diff --git a/.eslintignore b/.eslintignore index 1007fbc1a4..4bb33f1bd5 100644 --- a/.eslintignore +++ b/.eslintignore @@ -24,8 +24,6 @@ **/node_modules/** **/extensions/**/out/** **/extensions/**/build/** -/extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts -/extensions/big-data-cluster/src/bigDataCluster/controller/clusterApiGenerated2.ts **/extensions/**/colorize-fixtures/** **/extensions/html-language-features/server/lib/jquery.d.ts /extensions/markdown-language-features/media/** diff --git a/.eslintrc.json b/.eslintrc.json index 481e18997c..332639a702 100755 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -1147,7 +1147,6 @@ "extensions/azuremonitor/src/prompts/**", "extensions/azuremonitor/src/typings/findRemove.d.ts", "extensions/kusto/src/prompts/**", - "extensions/mssql/src/hdfs/webhdfs.ts", "extensions/mssql/src/prompts/**", "extensions/mssql/src/typings/bufferStreamReader.d.ts", "extensions/mssql/src/typings/findRemove.d.ts", diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 489be31fe8..088411705a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,6 @@ /extensions/arc/ @Charles-Gagnon @swells @candiceye /extensions/azcli/ @Charles-Gagnon @swells @candiceye /extensions/azurecore/ @cssuh @cheenamalhotra -/extensions/big-data-cluster/ @Charles-Gagnon /extensions/dacpac/ @kisantia /extensions/notebook @azure-data-studio-notebook-devs /extensions/query-history/ @Charles-Gagnon diff --git a/build/azure-pipelines/win32/sql-product-test-win32.yml b/build/azure-pipelines/win32/sql-product-test-win32.yml index d5efdd4717..004023454e 100644 --- a/build/azure-pipelines/win32/sql-product-test-win32.yml +++ b/build/azure-pipelines/win32/sql-product-test-win32.yml @@ -41,7 +41,7 @@ steps: inputs: azureSubscription: 'ClientToolsInfra_670062 (88d5392f-a34f-4769-b405-f597fc533613)' KeyVaultName: SqlToolsSecretStore - SecretsFilter: 'ads-integration-test-azure-server,ads-integration-test-azure-server-password,ads-integration-test-azure-server-username,ads-integration-test-bdc-server,ads-integration-test-bdc-server-password,ads-integration-test-bdc-server-username,ads-integration-test-standalone-server,ads-integration-test-standalone-server-password,ads-integration-test-standalone-server-username,ads-integration-test-standalone-server-2019,ads-integration-test-standalone-server-password-2019,ads-integration-test-standalone-server-username-2019' + SecretsFilter: 'ads-integration-test-azure-server,ads-integration-test-azure-server-password,ads-integration-test-azure-server-username,ads-integration-test-standalone-server,ads-integration-test-standalone-server-password,ads-integration-test-standalone-server-username,ads-integration-test-standalone-server-2019,ads-integration-test-standalone-server-password-2019,ads-integration-test-standalone-server-username-2019' - powershell: | . build/azure-pipelines/win32/exec.ps1 @@ -54,9 +54,6 @@ steps: condition: and(succeeded(), and(eq(variables['RUN_TESTS'], 'true'), ne(variables['RUN_INTEGRATION_TESTS'], 'false'))) displayName: Run stable tests env: - BDC_BACKEND_USERNAME: $(ads-integration-test-bdc-server-username) - BDC_BACKEND_PWD: $(ads-integration-test-bdc-server-password) - BDC_BACKEND_HOSTNAME: $(ads-integration-test-bdc-server) STANDALONE_SQL_USERNAME: $(ads-integration-test-standalone-server-username) STANDALONE_SQL_PWD: $(ads-integration-test-standalone-server-password) STANDALONE_SQL: $(ads-integration-test-standalone-server) diff --git a/build/filters.js b/build/filters.js index 594741196f..708bf25078 100644 --- a/build/filters.js +++ b/build/filters.js @@ -152,8 +152,6 @@ module.exports.indentationFilter = [ '!extensions/sql-database-projects/src/test/baselines/*.json', '!extensions/sql-database-projects/src/test/baselines/*.sqlproj', '!extensions/sql-database-projects/BuildDirectory/SystemDacpacs/**', - '!extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts', - '!extensions/big-data-cluster/src/bigDataCluster/controller/clusterApiGenerated2.ts', '!resources/linux/snap/electron-launch', '!extensions/markdown-language-features/media/*.js', '!extensions/simple-browser/media/*.js', @@ -202,7 +200,6 @@ module.exports.copyrightFilter = [ '!extensions/import/flatfileimportservice/**', '!extensions/kusto/src/prompts/**', '!extensions/mssql/sqltoolsservice/**', - '!extensions/mssql/src/hdfs/webhdfs.ts', '!extensions/mssql/src/prompts/**', '!extensions/notebook/resources/jupyter_config/**', '!extensions/notebook/src/intellisense/text.ts', @@ -257,8 +254,6 @@ module.exports.tsFormattingFilter = [ '!extensions/html-language-features/server/lib/jquery.d.ts', // {{SQL CARBON EDIT}} - '!extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts', - '!extensions/big-data-cluster/src/bigDataCluster/controller/tokenApiGenerated.ts', '!src/vs/workbench/services/themes/common/textMateScopeMatcher.ts', // skip this because we have no plans on touching this and its not ours '!src/vs/workbench/contrib/extensions/browser/extensionRecommendationsService.ts', // skip this because known issue '!build/**/*' diff --git a/build/gulpfile.reh.js b/build/gulpfile.reh.js index 45c216d03a..3a669fce9f 100644 --- a/build/gulpfile.reh.js +++ b/build/gulpfile.reh.js @@ -448,7 +448,7 @@ function packagePkgTask(platform, arch, pkgTarget) { // rebuild extensions that contain native npm modules or have conditional webpack rules // when building with the web .yarnrc settings (e.g. runtime=node, etc.) // this is needed to have correct module set published with desired ABI - const rebuildExtensions = ['big-data-cluster', 'mssql', 'notebook']; + const rebuildExtensions = ['mssql', 'notebook']; const EXTENSIONS = path.join(REPO_ROOT, 'extensions'); function exec(cmdLine, cwd) { console.log(cmdLine); diff --git a/build/gulpfile.vscode.js b/build/gulpfile.vscode.js index 6afe661808..dcb4277c5a 100644 --- a/build/gulpfile.vscode.js +++ b/build/gulpfile.vscode.js @@ -125,7 +125,6 @@ const extensionsFilter = filter([ '**/azcli.xlf', '**/azurecore.xlf', '**/azurehybridtoolkit.xlf', - '**/big-data-cluster.xlf', '**/cms.xlf', '**/dacpac.xlf', '**/git.xlf', diff --git a/build/lib/extensions.ts b/build/lib/extensions.ts index be2e1d7b30..f72cd0dea4 100644 --- a/build/lib/extensions.ts +++ b/build/lib/extensions.ts @@ -328,7 +328,6 @@ export const vscodeExternalExtensions = [ // extensions that require a rebuild since they have native parts const rebuildExtensions = [ - 'big-data-cluster', 'mssql' ]; diff --git a/build/npm/dirs.js b/build/npm/dirs.js index eda24d884a..d80d0c98b9 100644 --- a/build/npm/dirs.js +++ b/build/npm/dirs.js @@ -17,7 +17,6 @@ exports.dirs = [ 'extensions/azurecore', 'extensions/azurehybridtoolkit', 'extensions/azuremonitor', - 'extensions/big-data-cluster', 'extensions/cms', 'extensions/configuration-editing', 'extensions/dacpac', diff --git a/extensions/big-data-cluster/.eslintrc.json b/extensions/big-data-cluster/.eslintrc.json deleted file mode 100644 index 77cd8b61f8..0000000000 --- a/extensions/big-data-cluster/.eslintrc.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "parserOptions": { - "project": "./extensions/big-data-cluster/tsconfig.json" - }, - "rules": { - // Disabled until the issues can be fixed - "@typescript-eslint/explicit-function-return-type": ["off"] - } -} diff --git a/extensions/big-data-cluster/.gitignore b/extensions/big-data-cluster/.gitignore deleted file mode 100644 index dfacd4d5b4..0000000000 --- a/extensions/big-data-cluster/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.vsix \ No newline at end of file diff --git a/extensions/big-data-cluster/.vscodeignore b/extensions/big-data-cluster/.vscodeignore deleted file mode 100644 index 8c2a960c91..0000000000 --- a/extensions/big-data-cluster/.vscodeignore +++ /dev/null @@ -1,14 +0,0 @@ -.gitignore -instructions.txt -src/** -out/** -extension.webpack.config.js -tsconfig.json -yarn.lock - -node_modules -!node_modules/@microsoft/ads-kerberos/package.json -!node_modules/@microsoft/ads-kerberos/LICENSE -!node_modules/@microsoft/ads-kerberos/lib -!node_modules/@microsoft/ads-kerberos/index.js -!node_modules/@microsoft/ads-kerberos/build/Release/kerberos.node diff --git a/extensions/big-data-cluster/README.md b/extensions/big-data-cluster/README.md deleted file mode 100644 index 1f8c185a2b..0000000000 --- a/extensions/big-data-cluster/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Microsoft SQL Server Big Data Cluster Extension for Azure Data Studio - -Welcome to Microsoft SQL Server Big Data Cluster Extension for Azure Data Studio! - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Privacy Statement - -The [Microsoft Enterprise and Developer Privacy Statement](https://privacy.microsoft.com/privacystatement) describes the privacy statement of this software. - -## License - -Copyright (c) Microsoft Corporation. All rights reserved. - -Licensed under the [Source EULA](https://raw.githubusercontent.com/Microsoft/azuredatastudio/main/LICENSE.txt). diff --git a/extensions/big-data-cluster/extension.webpack.config.js b/extensions/big-data-cluster/extension.webpack.config.js deleted file mode 100644 index 248726ff49..0000000000 --- a/extensions/big-data-cluster/extension.webpack.config.js +++ /dev/null @@ -1,20 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -//@ts-check - -'use strict'; - -const withDefaults = require('../shared.webpack.config'); - -module.exports = withDefaults({ - context: __dirname, - entry: { - extension: './src/extension.ts' - }, - externals: { - '@microsoft/ads-kerberos': 'commonjs @microsoft/ads-kerberos' - } -}); diff --git a/extensions/big-data-cluster/images/extension.png b/extensions/big-data-cluster/images/extension.png deleted file mode 100644 index c86d6d1e00..0000000000 Binary files a/extensions/big-data-cluster/images/extension.png and /dev/null differ diff --git a/extensions/big-data-cluster/images/sql_bdc.svg b/extensions/big-data-cluster/images/sql_bdc.svg deleted file mode 100644 index 07e4a1cca4..0000000000 --- a/extensions/big-data-cluster/images/sql_bdc.svg +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/extensions/big-data-cluster/instructions.txt b/extensions/big-data-cluster/instructions.txt deleted file mode 100644 index 98223b140c..0000000000 --- a/extensions/big-data-cluster/instructions.txt +++ /dev/null @@ -1,25 +0,0 @@ -How to update the Swagger-generated API to contact the controller - -## BdcRouter API: -1. You need to get the API specification. Long-term you should be able to get from the server, -but for now go to the internal repository and find the checked in SwaggerClient.yaml there. - -2. Copy the content from there, and add into https://editor.swagger.io/ -3. Choose Generate Client, and choose Typescript-Node as the client to generate -4. This will download a zip file. Open it and copy contents of api.ts -5. Copy this content to apiGenerated.ts -- keep the copyright header and everything above the let defaultBasePath = xyz line, -- Override the rest of the file -6. Format the apiGenerated.ts file so it passes gulp hygiene - -## TokenRouter and other APIs: -1. Get the API spec. This is available from a cluster at the address https://:30080/docs/swagger.json, where is the controller IP address. -2. Copy the content from there, and add convert from OpenApi 3.0 to Swagger 2.0 so we can use the Typescript-Node client generated by https://editor.swagger.io/. -Various converter tools are online. Alternatively, we might be able to use a different generator that has this client type (e.g. npm package @openapitools/openapi-generator-cli) but some require Java install. -3. Copy the converted Swagger 2.0 spec into https://editor.swagger.io/ -4. Choose Generate Client, and choose Typescript-Node as the client to generate -5. This will download a zip file. Open it and copy contents of api.ts -6. Copy this content to tokenApiGenerated.ts -- keep the copyright header and everything above the let defaultBasePath = xyz line, -- Override the rest of the file -7. Format the tokenApiGenerated.ts file so it passes gulp hygiene diff --git a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-aks.ipynb b/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-aks.ipynb deleted file mode 100644 index e5df66284a..0000000000 --- a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-aks.ipynb +++ /dev/null @@ -1,430 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![Microsoft](https://raw.githubusercontent.com/microsoft/azuredatastudio/main/extensions/resource-deployment/images/microsoft-small-logo.png)\n", - " \n", - "## Create Azure Kubernetes Service cluster and deploy SQL Server 2019 Big Data Cluster\n", - " \n", - "This notebook walks through the process of creating a new Azure Kubernetes Service cluster first, and then deploys a SQL Server 2019 Big Data Cluster on the newly created AKS cluster.\n", - " \n", - "* Follow the instructions in the **Prerequisites** cell to install the tools if not already installed.\n", - "* The **Required information** will check and prompt you for password if it is not set in the environment variable. The password will be used to access the cluster controller, SQL Server, and Knox.\n", - "\n", - "Please press the \"Run all\" button to run the notebook" - ], - "metadata": { - "azdata_cell_guid": "4f6bc3bc-3592-420a-b534-384011189005" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Prerequisites**\n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - "\n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|Azure CLI |Command-line tool for managing Azure services. Used to create AKS cluster | [Installation](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest) |\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |" - ], - "metadata": { - "azdata_cell_guid": "d949980e-ad3f-4d02-ae84-7e4fbb19a087" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Setup**" - ], - "metadata": { - "azdata_cell_guid": "a56d3413-a730-4997-b5c2-c8abd972757e" - } - }, - { - "cell_type": "code", - "source": [ - "import pandas,sys,os,json,html,getpass,time\n", - "pandas_version = pandas.__version__.split('.')\n", - "pandas_major = int(pandas_version[0])\n", - "pandas_minor = int(pandas_version[1])\n", - "pandas_patch = int(pandas_version[2])\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\n", - " sys.exit('Please upgrade the Notebook dependency before you can proceed, you can do it by running the \"Reinstall Notebook dependencies\" command in command palette (View menu -> Command Palette…).')\n", - "def run_command(command):\n", - " print(\"Executing: \" + command)\n", - " !{command}\n", - " if _exit_code != 0:\n", - " sys.exit(f'Command execution failed with exit code: {str(_exit_code)}.\\n\\t{command}\\n')\n", - " print(f'Successfully executed: {command}')" - ], - "metadata": { - "azdata_cell_guid": "326645cf-022a-47f2-8aff-37de71da8955", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set variables**\n", - "Generated by Azure Data Studio using the values collected in the Deploy Big Data Cluster wizard" - ], - "metadata": { - "azdata_cell_guid": "8716915b-1439-431b-ab0a-0221ef94cb7f" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Check dependencies**" - ], - "metadata": { - "azdata_cell_guid": "db8b1e21-eb2c-4c35-b973-bc4ef38bb1d0" - } - }, - { - "cell_type": "code", - "source": [ - "run_command('kubectl version --client=true')\n", - "run_command('azdata --version')\n", - "run_command('az --version')" - ], - "metadata": { - "azdata_cell_guid": "9361deaf-28b1-4d02-912d-2011cae97e8a", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Required information**" - ], - "metadata": { - "azdata_cell_guid": "720c200c-322a-49dd-9aa3-8bf7946aa251" - } - }, - { - "cell_type": "code", - "source": [ - "invoked_by_wizard = \"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\" in os.environ\n", - "if invoked_by_wizard:\n", - " mssql_password = os.environ[\"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\"]\n", - "else:\n", - " mssql_password = getpass.getpass(prompt = 'SQL Server 2019 Big Data Cluster controller password')\n", - " if mssql_password == \"\":\n", - " sys.exit(f'Password is required.')\n", - " confirm_password = getpass.getpass(prompt = 'Confirm password')\n", - " if mssql_password != confirm_password:\n", - " sys.exit(f'Passwords do not match.')\n", - "print('You can also use the controller password to access Knox and SQL Server.')" - ], - "metadata": { - "azdata_cell_guid": "17e5d087-7128-4d02-8c16-fe1ddee675e5", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to Azure**\n", - "\n", - "This will open a web browser window to enable credentials to be entered. If this cells is hanging forever, it might be because your Web browser windows is waiting for you to enter your Azure credentials!\n", - "" - ], - "metadata": { - "azdata_cell_guid": "baddf2d9-93ee-4c42-aaf1-b42116bb1912" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'az login')" - ], - "metadata": { - "azdata_cell_guid": "8f1404a6-216d-49fb-b6ad-81beeea50083", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "\n", - "### **Set active Azure subscription**" - ], - "metadata": { - "azdata_cell_guid": "230dc0f1-bf6e-474a-bfaa-aae6f8aad12e" - } - }, - { - "cell_type": "code", - "source": [ - "if azure_subscription_id != \"\":\n", - " run_command(f'az account set --subscription {azure_subscription_id}')\n", - "else:\n", - " print('Using the default Azure subscription', {azure_subscription_id})\n", - "run_command(f'az account show')" - ], - "metadata": { - "azdata_cell_guid": "ab230931-2e99-483b-a229-3847684a8c1c", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create Azure resource group**" - ], - "metadata": { - "azdata_cell_guid": "d51db914-f484-489f-990d-72edb3065068" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'az group create --name {azure_resource_group} --location {azure_region}')" - ], - "metadata": { - "azdata_cell_guid": "7c53eb23-c327-41bf-8936-bd34a02ebdd5", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create AKS cluster**" - ], - "metadata": { - "azdata_cell_guid": "818eb705-71e2-4013-8420-44886a5468b2" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'az aks create --name {aks_cluster_name} --resource-group {azure_resource_group} --generate-ssh-keys --node-vm-size {azure_vm_size} --node-count {azure_vm_count}')" - ], - "metadata": { - "azdata_cell_guid": "3cea1da0-0c18-4030-a5aa-79bc98a5a14d", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set the new AKS cluster as current context**" - ], - "metadata": { - "azdata_cell_guid": "5ade8453-5e71-478f-b6b6-83c55626243d" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'az aks get-credentials --resource-group {azure_resource_group} --name {aks_cluster_name} --admin --overwrite-existing')" - ], - "metadata": { - "azdata_cell_guid": "9ccb9adf-1cf6-4dcb-8bd9-7ae9a85c2437", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create deployment configuration files**" - ], - "metadata": { - "azdata_cell_guid": "57eb69fb-c68f-4ba8-818d-ffbaa0bc7aec" - } - }, - { - "cell_type": "code", - "source": [ - "mssql_target_profile = 'ads-bdc-custom-profile'\n", - "if not os.path.exists(mssql_target_profile):\n", - " os.mkdir(mssql_target_profile)\n", - "bdcJsonObj = json.loads(bdc_json)\n", - "controlJsonObj = json.loads(control_json)\n", - "bdcJsonFile = open(f'{mssql_target_profile}/bdc.json', 'w')\n", - "bdcJsonFile.write(json.dumps(bdcJsonObj, indent = 4))\n", - "bdcJsonFile.close()\n", - "controlJsonFile = open(f'{mssql_target_profile}/control.json', 'w')\n", - "controlJsonFile.write(json.dumps(controlJsonObj, indent = 4))\n", - "controlJsonFile.close()\n", - "print(f'Created deployment configuration folder: {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "3fd73c04-8a79-4d08-9049-1dad30265558", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "6e82fad8-0fd0-4952-87ce-3fea1edd98cb" - } - }, - { - "cell_type": "code", - "source": [ - "print (f'Creating SQL Server 2019 Big Data Cluster: {mssql_cluster_name} using configuration {mssql_target_profile}')\n", - "os.environ[\"ACCEPT_EULA\"] = 'yes'\n", - "os.environ[\"AZDATA_USERNAME\"] = mssql_username\n", - "os.environ[\"AZDATA_PASSWORD\"] = mssql_password\n", - "if os.name == 'nt':\n", - " print(f'If you don\\'t see output produced by azdata, you can run the following command in a terminal window to check the deployment status:\\n\\t{os.environ[\"AZDATA_NB_VAR_KUBECTL\"]} get pods -n {mssql_cluster_name} ')\n", - "run_command(f'azdata bdc create -c {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "c43ea026-ca5e-4e2a-8602-fcc786354168", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "9c5428f4-08b9-4799-a35d-867c91dc29fb" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'azdata login --namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "5120c387-1088-435b-856e-e59f147c45a2", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Show SQL Server 2019 Big Data Cluster endpoints**" - ], - "metadata": { - "azdata_cell_guid": "97974eda-e108-4c21-a58e-c6bb58f14ef1" - } - }, - { - "cell_type": "code", - "source": [ - "from IPython.display import *\n", - "pandas.set_option('display.max_colwidth', -1)\n", - "cmd = f'azdata bdc endpoint list'\n", - "cmdOutput = !{cmd}\n", - "endpoints = json.loads(''.join(cmdOutput))\n", - "endpointsDataFrame = pandas.DataFrame(endpoints)\n", - "endpointsDataFrame.columns = [' '.join(word[0].upper() + word[1:] for word in columnName.split()) for columnName in endpoints[0].keys()]\n", - "display(HTML(endpointsDataFrame.to_html(index=False, render_links=True)))" - ], - "metadata": { - "azdata_cell_guid": "9a5d0aef-a8da-4845-b470-d714435f0304", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Connect to SQL Server Master instance in Azure Data Studio**\n", - "Click the link below to connect to the SQL Server Master instance of the SQL Server 2019 Big Data Cluster." - ], - "metadata": { - "azdata_cell_guid": "4a49b629-bd7a-43ba-bf18-6cdc0737b0f9" - } - }, - { - "cell_type": "code", - "source": [ - "sqlEndpoints = [x for x in endpoints if x['name'] == 'sql-server-master']\n", - "if sqlEndpoints and len(sqlEndpoints) == 1:\n", - " connectionParameter = '{\"serverName\":\"' + sqlEndpoints[0]['endpoint'] + '\",\"providerName\":\"MSSQL\",\"authenticationType\":\"SqlLogin\",\"userName\":' + json.dumps(mssql_username) + ',\"password\":' + json.dumps(mssql_password) + '}'\n", - " display(HTML('
Click here to connect to SQL Server Master instance
'))\n", - " display(HTML('
NOTE: The SQL Server password is included in this link, you may want to clear the results of this code cell before saving the notebook.'))\n", - "else:\n", - " sys.exit('Could not find the SQL Server Master instance endpoint.')" - ], - "metadata": { - "azdata_cell_guid": "1c9d1f2c-62ba-4070-920a-d30b67bdcc7c", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - } - ] -} diff --git a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aks.ipynb b/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aks.ipynb deleted file mode 100644 index 1ea0a78eb5..0000000000 --- a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aks.ipynb +++ /dev/null @@ -1,329 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![Microsoft](https://raw.githubusercontent.com/microsoft/azuredatastudio/main/extensions/resource-deployment/images/microsoft-small-logo.png)\n", - " \n", - "## Deploy SQL Server 2019 Big Data Cluster on an existing Azure Kubernetes Service (AKS) cluster\n", - " \n", - "This notebook walks through the process of deploying a SQL Server 2019 Big Data Cluster on an existing AKS cluster.\n", - " \n", - "* Follow the instructions in the **Prerequisites** cell to install the tools if not already installed.\n", - "* The **Required information** will check and prompt you for password if it is not set in the environment variable. The password can be used to access the cluster controller, SQL Server, and Knox.\n", - "\n", - "Please press the \"Run all\" button to run the notebook" - ], - "metadata": { - "azdata_cell_guid": "82e60c1a-7acf-47ee-877f-9e85e92e11da" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Prerequisites** \n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - " \n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |" - ], - "metadata": { - "azdata_cell_guid": "714582b9-10ee-409e-ab12-15a4825c9471" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Setup**" - ], - "metadata": { - "azdata_cell_guid": "e3dd8e75-e15f-44b4-81fc-1f54d6f0b1e2" - } - }, - { - "cell_type": "code", - "source": [ - "import pandas,sys,os,json,html,getpass,time\n", - "pandas_version = pandas.__version__.split('.')\n", - "pandas_major = int(pandas_version[0])\n", - "pandas_minor = int(pandas_version[1])\n", - "pandas_patch = int(pandas_version[2])\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\n", - " sys.exit('Please upgrade the Notebook dependency before you can proceed, you can do it by running the \"Reinstall Notebook dependencies\" command in command palette (View menu -> Command Palette…).')\n", - "def run_command(command):\n", - " print(\"Executing: \" + command)\n", - " !{command}\n", - " if _exit_code != 0:\n", - " sys.exit(f'Command execution failed with exit code: {str(_exit_code)}.\\n\\t{command}\\n')\n", - " print(f'Successfully executed: {command}')" - ], - "metadata": { - "azdata_cell_guid": "d973d5b4-7f0a-4a9d-b204-a16480f3940d", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set variables**\n", - "Generated by Azure Data Studio using the values collected in the Deploy Big Data Cluster wizard" - ], - "metadata": { - "azdata_cell_guid": "4b266b2d-bd1b-4565-92c9-3fc146cdce6d" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Check dependencies**" - ], - "metadata": { - "azdata_cell_guid": "2544648b-59c9-4ce5-a3b6-87086e214d4c" - } - }, - { - "cell_type": "code", - "source": [ - "run_command('kubectl version --client=true')\n", - "run_command('azdata --version')" - ], - "metadata": { - "azdata_cell_guid": "691671d7-3f05-406c-a183-4cff7d17f83d", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Required information**" - ], - "metadata": { - "azdata_cell_guid": "0bb02e76-fee8-4dbc-a75b-d5b9d1b187d0" - } - }, - { - "cell_type": "code", - "source": [ - "invoked_by_wizard = \"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\" in os.environ\n", - "if invoked_by_wizard:\n", - " mssql_password = os.environ[\"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\"]\n", - "else:\n", - " mssql_password = getpass.getpass(prompt = 'SQL Server 2019 Big Data Cluster controller password')\n", - " if mssql_password == \"\":\n", - " sys.exit(f'Password is required.')\n", - " confirm_password = getpass.getpass(prompt = 'Confirm password')\n", - " if mssql_password != confirm_password:\n", - " sys.exit(f'Passwords do not match.')\n", - "print('You can also use the controller password to access Knox and SQL Server.')" - ], - "metadata": { - "azdata_cell_guid": "e7e10828-6cae-45af-8c2f-1484b6d4f9ac", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set and show current context**" - ], - "metadata": { - "azdata_cell_guid": "127c8042-181f-4862-a390-96e59c181d09" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl config use-context {mssql_cluster_context}')\n", - "run_command('kubectl config current-context')" - ], - "metadata": { - "azdata_cell_guid": "7d1a03d4-1df8-48eb-bff0-0042603b95b1", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create deployment configuration files**" - ], - "metadata": { - "azdata_cell_guid": "138536c3-1db6-428f-9e5c-8269a02fb52e" - } - }, - { - "cell_type": "code", - "source": [ - "mssql_target_profile = 'ads-bdc-custom-profile'\n", - "if not os.path.exists(mssql_target_profile):\n", - " os.mkdir(mssql_target_profile)\n", - "bdcJsonObj = json.loads(bdc_json)\n", - "controlJsonObj = json.loads(control_json)\n", - "bdcJsonFile = open(f'{mssql_target_profile}/bdc.json', 'w')\n", - "bdcJsonFile.write(json.dumps(bdcJsonObj, indent = 4))\n", - "bdcJsonFile.close()\n", - "controlJsonFile = open(f'{mssql_target_profile}/control.json', 'w')\n", - "controlJsonFile.write(json.dumps(controlJsonObj, indent = 4))\n", - "controlJsonFile.close()\n", - "print(f'Created deployment configuration folder: {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "2ff82c8a-4bce-449c-9d91-3ac7dd272021", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "efe78cd3-ed73-4c9b-b586-fdd6c07dd37f" - } - }, - { - "cell_type": "code", - "source": [ - "print (f'Creating SQL Server 2019 Big Data Cluster: {mssql_cluster_name} using configuration {mssql_target_profile}')\n", - "os.environ[\"ACCEPT_EULA\"] = 'yes'\n", - "os.environ[\"AZDATA_USERNAME\"] = mssql_username\n", - "os.environ[\"AZDATA_PASSWORD\"] = mssql_password\n", - "if os.name == 'nt':\n", - " print(f'If you don\\'t see output produced by azdata, you can run the following command in a terminal window to check the deployment status:\\n\\t{os.environ[\"AZDATA_NB_VAR_KUBECTL\"]} get pods -n {mssql_cluster_name} ')\n", - "run_command(f'azdata bdc create -c {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "373947a1-90b9-49ee-86f4-17a4c7d4ca76", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "4e026d39-12d4-4c80-8e30-de2b782f2110" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'azdata login --namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "79adda27-371d-4dcb-b867-db025f8162a5", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Show SQL Server 2019 Big Data Cluster endpoints**" - ], - "metadata": { - "azdata_cell_guid": "c1921288-ad11-40d8-9aea-127a722b3df8" - } - }, - { - "cell_type": "code", - "source": [ - "from IPython.display import *\n", - "pandas.set_option('display.max_colwidth', -1)\n", - "cmd = f'azdata bdc endpoint list'\n", - "cmdOutput = !{cmd}\n", - "endpoints = json.loads(''.join(cmdOutput))\n", - "endpointsDataFrame = pandas.DataFrame(endpoints)\n", - "endpointsDataFrame.columns = [' '.join(word[0].upper() + word[1:] for word in columnName.split()) for columnName in endpoints[0].keys()]\n", - "display(HTML(endpointsDataFrame.to_html(index=False, render_links=True)))" - ], - "metadata": { - "azdata_cell_guid": "a2202494-fd6c-4534-987d-15c403a5096f", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Connect to SQL Server Master instance in Azure Data Studio**\n", - "Click the link below to connect to the SQL Server Master instance of the SQL Server 2019 Big Data Cluster." - ], - "metadata": { - "azdata_cell_guid": "621863a2-aa61-46f4-a9d0-717f41c009ee" - } - }, - { - "cell_type": "code", - "source": [ - "sqlEndpoints = [x for x in endpoints if x['name'] == 'sql-server-master']\n", - "if sqlEndpoints and len(sqlEndpoints) == 1:\n", - " connectionParameter = '{\"serverName\":\"' + sqlEndpoints[0]['endpoint'] + '\",\"providerName\":\"MSSQL\",\"authenticationType\":\"SqlLogin\",\"userName\":' + json.dumps(mssql_username) + ',\"password\":' + json.dumps(mssql_password) + '}'\n", - " display(HTML('
Click here to connect to SQL Server Master instance
'))\n", - " display(HTML('
NOTE: The SQL Server password is included in this link, you may want to clear the results of this code cell before saving the notebook.'))\n", - "else:\n", - " sys.exit('Could not find the SQL Server Master instance endpoint.')" - ], - "metadata": { - "azdata_cell_guid": "48342355-9d2b-4fa6-b1aa-3bc77d434dfa", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - } - ] -} diff --git a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aro.ipynb b/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aro.ipynb deleted file mode 100644 index b8591d2b6f..0000000000 --- a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-aro.ipynb +++ /dev/null @@ -1,351 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![Microsoft](https://raw.githubusercontent.com/microsoft/azuredatastudio/main/extensions/resource-deployment/images/microsoft-small-logo.png)\n", - " \n", - "## Deploy SQL Server 2019 Big Data Cluster on an existing Azure Red Hat OpenShift cluster\n", - " \n", - "This notebook walks through the process of deploying a SQL Server 2019 Big Data Cluster on an existing Azure Red Hat OpenShift cluster.\n", - " \n", - "* Follow the instructions in the **Prerequisites** cell to install the tools if not already installed.\n", - "* The **Required information** will check and prompt you for password if it is not set in the environment variable. The password can be used to access the cluster controller, SQL Server, and Knox.\n", - "\n", - "Please press the \"Run all\" button to run the notebook" - ], - "metadata": { - "azdata_cell_guid": "23954d96-3932-4a8e-ab73-da605f99b1a4" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Prerequisites** \n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - " \n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |" - ], - "metadata": { - "azdata_cell_guid": "1d7f4c6a-0cb8-4ecc-81c8-544712253a3f" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Setup**" - ], - "metadata": { - "azdata_cell_guid": "a31f9894-903f-4e19-a5a8-6fd888ff013b" - } - }, - { - "cell_type": "code", - "source": [ - "import pandas,sys,os,json,html,getpass,time\n", - "pandas_version = pandas.__version__.split('.')\n", - "pandas_major = int(pandas_version[0])\n", - "pandas_minor = int(pandas_version[1])\n", - "pandas_patch = int(pandas_version[2])\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\n", - " sys.exit('Please upgrade the Notebook dependency before you can proceed, you can do it by running the \"Reinstall Notebook dependencies\" command in command palette (View menu -> Command Palette…).')\n", - "def run_command(command):\n", - " print(\"Executing: \" + command)\n", - " !{command}\n", - " if _exit_code != 0:\n", - " sys.exit(f'Command execution failed with exit code: {str(_exit_code)}.\\n\\t{command}\\n')\n", - " print(f'Successfully executed: {command}')" - ], - "metadata": { - "azdata_cell_guid": "26fa8bc4-4b8e-4c31-ae11-50484821cea8", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set variables**\n", - "Generated by Azure Data Studio using the values collected in the Deploy Big Data Cluster wizard" - ], - "metadata": { - "azdata_cell_guid": "e70640d0-6059-4cab-939e-e985a978c0da" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Check dependencies**" - ], - "metadata": { - "azdata_cell_guid": "869d0397-a280-4dc4-be76-d652189b5131" - } - }, - { - "cell_type": "code", - "source": [ - "run_command('kubectl version --client=true')\n", - "run_command('azdata --version')" - ], - "metadata": { - "azdata_cell_guid": "c38afb67-1132-495e-9af1-35bf067acbeb", - "tags": [] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Required information**" - ], - "metadata": { - "azdata_cell_guid": "7b383b0d-5687-45b3-a16f-ba3b170c796e" - } - }, - { - "cell_type": "code", - "source": [ - "invoked_by_wizard = \"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\" in os.environ\n", - "if invoked_by_wizard:\n", - " mssql_password = os.environ[\"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\"]\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = os.environ[\"AZDATA_NB_VAR_BDC_AD_DOMAIN_SVC_PASSWORD\"]\n", - "else:\n", - " mssql_password = getpass.getpass(prompt = 'SQL Server 2019 Big Data Cluster controller password')\n", - " if mssql_password == \"\":\n", - " sys.exit(f'Password is required.')\n", - " confirm_password = getpass.getpass(prompt = 'Confirm password')\n", - " if mssql_password != confirm_password:\n", - " sys.exit(f'Passwords do not match.')\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = getpass.getpass(prompt = 'Domain service account password')\n", - " if mssql_domain_service_account_password == \"\":\n", - " sys.exit(f'Domain service account password is required.')\n", - "print('You can also use the controller password to access Knox and SQL Server.')" - ], - "metadata": { - "azdata_cell_guid": "b5970f2b-cf13-41af-b0a2-5133d840325e", - "tags": [] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set and show current context**" - ], - "metadata": { - "azdata_cell_guid": "6456bd0c-5b64-4d76-be59-e3a5b32697f5" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl config use-context {mssql_cluster_context}')\n", - "run_command('kubectl config current-context')" - ], - "metadata": { - "azdata_cell_guid": "a38f8b3a-f93a-484c-b9e2-4eba3ed99cc2" - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Make sure the target namespace already exists**" - ], - "metadata": { - "azdata_cell_guid": "3bf1d902-2217-4c99-b2d6-38e45de8e308" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl get namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "6ca9bf71-049a-458e-8000-311d4c15b1ca" - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create deployment configuration files**" - ], - "metadata": { - "azdata_cell_guid": "6d78da36-6af5-4309-baad-bc81bb2cdb7f" - } - }, - { - "cell_type": "code", - "source": [ - "mssql_target_profile = 'ads-bdc-custom-profile'\n", - "if not os.path.exists(mssql_target_profile):\n", - " os.mkdir(mssql_target_profile)\n", - "bdcJsonObj = json.loads(bdc_json)\n", - "controlJsonObj = json.loads(control_json)\n", - "bdcJsonFile = open(f'{mssql_target_profile}/bdc.json', 'w')\n", - "bdcJsonFile.write(json.dumps(bdcJsonObj, indent = 4))\n", - "bdcJsonFile.close()\n", - "controlJsonFile = open(f'{mssql_target_profile}/control.json', 'w')\n", - "controlJsonFile.write(json.dumps(controlJsonObj, indent = 4))\n", - "controlJsonFile.close()\n", - "print(f'Created deployment configuration folder: {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "3110ab23-ecfc-4e36-a1c5-28536b7edebf", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7d56d262-8cd5-49e4-b745-332c6e7a3cb2" - } - }, - { - "cell_type": "code", - "source": [ - "print (f'Creating SQL Server 2019 Big Data Cluster: {mssql_cluster_name} using configuration {mssql_target_profile}')\n", - "os.environ[\"ACCEPT_EULA\"] = 'yes'\n", - "os.environ[\"AZDATA_USERNAME\"] = mssql_username\n", - "os.environ[\"AZDATA_PASSWORD\"] = mssql_password\n", - "if mssql_auth_mode == \"ad\":\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_USERNAME\"] = mssql_domain_service_account_username\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_PASSWORD\"] = mssql_domain_service_account_password\n", - "if os.name == 'nt':\n", - " print(f'If you don\\'t see output produced by azdata, you can run the following command in a terminal window to check the deployment status:\\n\\t{os.environ[\"AZDATA_NB_VAR_KUBECTL\"]} get pods -n {mssql_cluster_name} ')\n", - "run_command(f'azdata bdc create -c {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "0a743e88-e7d0-4b41-b8a3-e43985d15f2b", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7929fd90-324d-482a-a101-ae29cb183691" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'azdata login --namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "3a49909b-e09e-4e62-a825-c39de2cffc94", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Show SQL Server 2019 Big Data Cluster endpoints**" - ], - "metadata": { - "azdata_cell_guid": "038e801a-a393-4f8d-8e2d-97bc3b740b0c" - } - }, - { - "cell_type": "code", - "source": [ - "from IPython.display import *\n", - "pandas.set_option('display.max_colwidth', -1)\n", - "cmd = f'azdata bdc endpoint list'\n", - "cmdOutput = !{cmd}\n", - "endpoints = json.loads(''.join(cmdOutput))\n", - "endpointsDataFrame = pandas.DataFrame(endpoints)\n", - "endpointsDataFrame.columns = [' '.join(word[0].upper() + word[1:] for word in columnName.split()) for columnName in endpoints[0].keys()]\n", - "display(HTML(endpointsDataFrame.to_html(index=False, render_links=True)))" - ], - "metadata": { - "azdata_cell_guid": "2a8c8d5d-862c-4672-9309-38aa03afc4e6", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Connect to SQL Server Master instance in Azure Data Studio**\n", - "Click the link below to connect to the SQL Server Master instance of the SQL Server 2019 Big Data Cluster." - ], - "metadata": { - "azdata_cell_guid": "0bd809fa-8225-4954-a50c-da57ea167896" - } - }, - { - "cell_type": "code", - "source": [ - "sqlEndpoints = [x for x in endpoints if x['name'] == 'sql-server-master']\n", - "if sqlEndpoints and len(sqlEndpoints) == 1:\n", - " connectionParameter = '{\"serverName\":\"' + sqlEndpoints[0]['endpoint'] + '\",\"providerName\":\"MSSQL\",\"authenticationType\":\"SqlLogin\",\"userName\":' + json.dumps(mssql_username) + ',\"password\":' + json.dumps(mssql_password) + '}'\n", - " display(HTML('
Click here to connect to SQL Server Master instance
'))\n", - " display(HTML('
NOTE: The SQL Server password is included in this link, you may want to clear the results of this code cell before saving the notebook.'))\n", - "else:\n", - " sys.exit('Could not find the SQL Server Master instance endpoint.')" - ], - "metadata": { - "azdata_cell_guid": "d591785d-71aa-4c5d-9cbb-a7da79bca503", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - } - ] -} diff --git a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-kubeadm.ipynb b/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-kubeadm.ipynb deleted file mode 100644 index c18435a68d..0000000000 --- a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-kubeadm.ipynb +++ /dev/null @@ -1,335 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![Microsoft](https://raw.githubusercontent.com/microsoft/azuredatastudio/main/extensions/resource-deployment/images/microsoft-small-logo.png)\n", - " \n", - "## Deploy SQL Server 2019 Big Data Cluster on an existing cluster deployed using kubeadm\n", - " \n", - "This notebook walks through the process of deploying a SQL Server 2019 Big Data Cluster on an existing kubeadm cluster.\n", - " \n", - "* Follow the instructions in the **Prerequisites** cell to install the tools if not already installed.\n", - "* The **Required information** will check and prompt you for password if it is not set in the environment variable. The password can be used to access the cluster controller, SQL Server, and Knox.\n", - "\n", - "Please press the \"Run all\" button to run the notebook" - ], - "metadata": { - "azdata_cell_guid": "23954d96-3932-4a8e-ab73-da605f99b1a4" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Prerequisites** \n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - " \n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |" - ], - "metadata": { - "azdata_cell_guid": "1d7f4c6a-0cb8-4ecc-81c8-544712253a3f" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Setup**" - ], - "metadata": { - "azdata_cell_guid": "a31f9894-903f-4e19-a5a8-6fd888ff013b" - } - }, - { - "cell_type": "code", - "source": [ - "import pandas,sys,os,json,html,getpass,time\n", - "pandas_version = pandas.__version__.split('.')\n", - "pandas_major = int(pandas_version[0])\n", - "pandas_minor = int(pandas_version[1])\n", - "pandas_patch = int(pandas_version[2])\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\n", - " sys.exit('Please upgrade the Notebook dependency before you can proceed, you can do it by running the \"Reinstall Notebook dependencies\" command in command palette (View menu -> Command Palette…).')\n", - "def run_command(command):\n", - " print(\"Executing: \" + command)\n", - " !{command}\n", - " if _exit_code != 0:\n", - " sys.exit(f'Command execution failed with exit code: {str(_exit_code)}.\\n\\t{command}\\n')\n", - " print(f'Successfully executed: {command}')" - ], - "metadata": { - "azdata_cell_guid": "26fa8bc4-4b8e-4c31-ae11-50484821cea8", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set variables**\n", - "Generated by Azure Data Studio using the values collected in the Deploy Big Data Cluster wizard" - ], - "metadata": { - "azdata_cell_guid": "e70640d0-6059-4cab-939e-e985a978c0da" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Check dependencies**" - ], - "metadata": { - "azdata_cell_guid": "869d0397-a280-4dc4-be76-d652189b5131" - } - }, - { - "cell_type": "code", - "source": [ - "run_command('kubectl version --client=true')\n", - "run_command('azdata --version')" - ], - "metadata": { - "azdata_cell_guid": "c38afb67-1132-495e-9af1-35bf067acbeb", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Required information**" - ], - "metadata": { - "azdata_cell_guid": "7b383b0d-5687-45b3-a16f-ba3b170c796e" - } - }, - { - "cell_type": "code", - "source": [ - "invoked_by_wizard = \"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\" in os.environ\n", - "if invoked_by_wizard:\n", - " mssql_password = os.environ[\"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\"]\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = os.environ[\"AZDATA_NB_VAR_BDC_AD_DOMAIN_SVC_PASSWORD\"]\n", - "else:\n", - " mssql_password = getpass.getpass(prompt = 'SQL Server 2019 Big Data Cluster controller password')\n", - " if mssql_password == \"\":\n", - " sys.exit(f'Password is required.')\n", - " confirm_password = getpass.getpass(prompt = 'Confirm password')\n", - " if mssql_password != confirm_password:\n", - " sys.exit(f'Passwords do not match.')\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = getpass.getpass(prompt = 'Domain service account password')\n", - " if mssql_domain_service_account_password == \"\":\n", - " sys.exit(f'Domain service account password is required.')\n", - "print('You can also use the controller password to access Knox and SQL Server.')" - ], - "metadata": { - "azdata_cell_guid": "b5970f2b-cf13-41af-b0a2-5133d840325e", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set and show current context**" - ], - "metadata": { - "azdata_cell_guid": "6456bd0c-5b64-4d76-be59-e3a5b32697f5" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl config use-context {mssql_cluster_context}')\n", - "run_command('kubectl config current-context')" - ], - "metadata": { - "azdata_cell_guid": "a38f8b3a-f93a-484c-b9e2-4eba3ed99cc2" - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create deployment configuration files**" - ], - "metadata": { - "azdata_cell_guid": "6d78da36-6af5-4309-baad-bc81bb2cdb7f" - } - }, - { - "cell_type": "code", - "source": [ - "mssql_target_profile = 'ads-bdc-custom-profile'\n", - "if not os.path.exists(mssql_target_profile):\n", - " os.mkdir(mssql_target_profile)\n", - "bdcJsonObj = json.loads(bdc_json)\n", - "controlJsonObj = json.loads(control_json)\n", - "bdcJsonFile = open(f'{mssql_target_profile}/bdc.json', 'w')\n", - "bdcJsonFile.write(json.dumps(bdcJsonObj, indent = 4))\n", - "bdcJsonFile.close()\n", - "controlJsonFile = open(f'{mssql_target_profile}/control.json', 'w')\n", - "controlJsonFile.write(json.dumps(controlJsonObj, indent = 4))\n", - "controlJsonFile.close()\n", - "print(f'Created deployment configuration folder: {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "3110ab23-ecfc-4e36-a1c5-28536b7edebf", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7d56d262-8cd5-49e4-b745-332c6e7a3cb2" - } - }, - { - "cell_type": "code", - "source": [ - "print (f'Creating SQL Server 2019 Big Data Cluster: {mssql_cluster_name} using configuration {mssql_target_profile}')\n", - "os.environ[\"ACCEPT_EULA\"] = 'yes'\n", - "os.environ[\"AZDATA_USERNAME\"] = mssql_username\n", - "os.environ[\"AZDATA_PASSWORD\"] = mssql_password\n", - "if mssql_auth_mode == \"ad\":\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_USERNAME\"] = mssql_domain_service_account_username\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_PASSWORD\"] = mssql_domain_service_account_password\n", - "if os.name == 'nt':\n", - " print(f'If you don\\'t see output produced by azdata, you can run the following command in a terminal window to check the deployment status:\\n\\t{os.environ[\"AZDATA_NB_VAR_KUBECTL\"]} get pods -n {mssql_cluster_name} ')\n", - "run_command(f'azdata bdc create -c {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "0a743e88-e7d0-4b41-b8a3-e43985d15f2b", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7929fd90-324d-482a-a101-ae29cb183691" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'azdata login --namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "3a49909b-e09e-4e62-a825-c39de2cffc94", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Show SQL Server 2019 Big Data Cluster endpoints**" - ], - "metadata": { - "azdata_cell_guid": "038e801a-a393-4f8d-8e2d-97bc3b740b0c" - } - }, - { - "cell_type": "code", - "source": [ - "from IPython.display import *\n", - "pandas.set_option('display.max_colwidth', -1)\n", - "cmd = f'azdata bdc endpoint list'\n", - "cmdOutput = !{cmd}\n", - "endpoints = json.loads(''.join(cmdOutput))\n", - "endpointsDataFrame = pandas.DataFrame(endpoints)\n", - "endpointsDataFrame.columns = [' '.join(word[0].upper() + word[1:] for word in columnName.split()) for columnName in endpoints[0].keys()]\n", - "display(HTML(endpointsDataFrame.to_html(index=False, render_links=True)))" - ], - "metadata": { - "azdata_cell_guid": "2a8c8d5d-862c-4672-9309-38aa03afc4e6", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Connect to SQL Server Master instance in Azure Data Studio**\n", - "Click the link below to connect to the SQL Server Master instance of the SQL Server 2019 Big Data Cluster." - ], - "metadata": { - "azdata_cell_guid": "0bd809fa-8225-4954-a50c-da57ea167896" - } - }, - { - "cell_type": "code", - "source": [ - "sqlEndpoints = [x for x in endpoints if x['name'] == 'sql-server-master']\n", - "if sqlEndpoints and len(sqlEndpoints) == 1:\n", - " connectionParameter = '{\"serverName\":\"' + sqlEndpoints[0]['endpoint'] + '\",\"providerName\":\"MSSQL\",\"authenticationType\":\"SqlLogin\",\"userName\":' + json.dumps(mssql_username) + ',\"password\":' + json.dumps(mssql_password) + '}'\n", - " display(HTML('
Click here to connect to SQL Server Master instance
'))\n", - " display(HTML('
NOTE: The SQL Server password is included in this link, you may want to clear the results of this code cell before saving the notebook.'))\n", - "else:\n", - " sys.exit('Could not find the SQL Server Master instance endpoint.')" - ], - "metadata": { - "azdata_cell_guid": "d591785d-71aa-4c5d-9cbb-a7da79bca503", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - } - ] -} diff --git a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-openshift.ipynb b/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-openshift.ipynb deleted file mode 100644 index 59893d4796..0000000000 --- a/extensions/big-data-cluster/notebooks/deployment/2019/deploy-bdc-existing-openshift.ipynb +++ /dev/null @@ -1,353 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![Microsoft](https://raw.githubusercontent.com/microsoft/azuredatastudio/main/extensions/resource-deployment/images/microsoft-small-logo.png)\n", - " \n", - "## Deploy SQL Server 2019 Big Data Cluster on an existing OpenShift cluster\n", - " \n", - "This notebook walks through the process of deploying a SQL Server 2019 Big Data Cluster on an existing OpenShift cluster.\n", - " \n", - "* Follow the instructions in the **Prerequisites** cell to install the tools if not already installed.\n", - "* The **Required information** will check and prompt you for password if it is not set in the environment variable. The password can be used to access the cluster controller, SQL Server, and Knox.\n", - "\n", - "Please press the \"Run all\" button to run the notebook" - ], - "metadata": { - "azdata_cell_guid": "23954d96-3932-4a8e-ab73-da605f99b1a4" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Prerequisites** \n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - " \n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |" - ], - "metadata": { - "azdata_cell_guid": "1d7f4c6a-0cb8-4ecc-81c8-544712253a3f" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Setup**" - ], - "metadata": { - "azdata_cell_guid": "a31f9894-903f-4e19-a5a8-6fd888ff013b" - } - }, - { - "cell_type": "code", - "source": [ - "import pandas,sys,os,json,html,getpass,time\n", - "pandas_version = pandas.__version__.split('.')\n", - "pandas_major = int(pandas_version[0])\n", - "pandas_minor = int(pandas_version[1])\n", - "pandas_patch = int(pandas_version[2])\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\n", - " sys.exit('Please upgrade the Notebook dependency before you can proceed, you can do it by running the \"Reinstall Notebook dependencies\" command in command palette (View menu -> Command Palette…).')\n", - "def run_command(command):\n", - " print(\"Executing: \" + command)\n", - " !{command}\n", - " if _exit_code != 0:\n", - " sys.exit(f'Command execution failed with exit code: {str(_exit_code)}.\\n\\t{command}\\n')\n", - " print(f'Successfully executed: {command}')" - ], - "metadata": { - "azdata_cell_guid": "26fa8bc4-4b8e-4c31-ae11-50484821cea8", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set variables**\n", - "Generated by Azure Data Studio using the values collected in the Deploy Big Data Cluster wizard" - ], - "metadata": { - "azdata_cell_guid": "e70640d0-6059-4cab-939e-e985a978c0da" - } - }, - { - "cell_type": "markdown", - "source": [ - "### **Check dependencies**" - ], - "metadata": { - "azdata_cell_guid": "869d0397-a280-4dc4-be76-d652189b5131" - } - }, - { - "cell_type": "code", - "source": [ - "run_command('kubectl version --client=true')\n", - "run_command('azdata --version')" - ], - "metadata": { - "azdata_cell_guid": "c38afb67-1132-495e-9af1-35bf067acbeb", - "tags": [] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Required information**" - ], - "metadata": { - "azdata_cell_guid": "7b383b0d-5687-45b3-a16f-ba3b170c796e" - } - }, - { - "cell_type": "code", - "source": [ - "invoked_by_wizard = \"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\" in os.environ\n", - "if invoked_by_wizard:\n", - " mssql_password = os.environ[\"AZDATA_NB_VAR_BDC_ADMIN_PASSWORD\"]\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = os.environ[\"AZDATA_NB_VAR_BDC_AD_DOMAIN_SVC_PASSWORD\"]\n", - "else:\n", - " mssql_password = getpass.getpass(prompt = 'SQL Server 2019 Big Data Cluster controller password')\n", - " if mssql_password == \"\":\n", - " sys.exit(f'Password is required.')\n", - " confirm_password = getpass.getpass(prompt = 'Confirm password')\n", - " if mssql_password != confirm_password:\n", - " sys.exit(f'Passwords do not match.')\n", - " if mssql_auth_mode == \"ad\":\n", - " mssql_domain_service_account_password = getpass.getpass(prompt = 'Domain service account password')\n", - " if mssql_domain_service_account_password == \"\":\n", - " sys.exit(f'Domain service account password is required.')\n", - "print('You can also use the controller password to access Knox and SQL Server.')" - ], - "metadata": { - "azdata_cell_guid": "b5970f2b-cf13-41af-b0a2-5133d840325e", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Set and show current context**" - ], - "metadata": { - "azdata_cell_guid": "6456bd0c-5b64-4d76-be59-e3a5b32697f5" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl config use-context {mssql_cluster_context}')\n", - "run_command('kubectl config current-context')" - ], - "metadata": { - "azdata_cell_guid": "a38f8b3a-f93a-484c-b9e2-4eba3ed99cc2" - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Make sure the target namespace already exists**" - ], - "metadata": { - "azdata_cell_guid": "b903f09b-0eeb-45c0-8173-1741cce3790c" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'kubectl get namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "174c02ea-8876-43be-bd93-3a39223e25ec" - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create deployment configuration files**" - ], - "metadata": { - "azdata_cell_guid": "6d78da36-6af5-4309-baad-bc81bb2cdb7f" - } - }, - { - "cell_type": "code", - "source": [ - "mssql_target_profile = 'ads-bdc-custom-profile'\n", - "if not os.path.exists(mssql_target_profile):\n", - " os.mkdir(mssql_target_profile)\n", - "bdcJsonObj = json.loads(bdc_json)\n", - "controlJsonObj = json.loads(control_json)\n", - "bdcJsonFile = open(f'{mssql_target_profile}/bdc.json', 'w')\n", - "bdcJsonFile.write(json.dumps(bdcJsonObj, indent = 4))\n", - "bdcJsonFile.close()\n", - "controlJsonFile = open(f'{mssql_target_profile}/control.json', 'w')\n", - "controlJsonFile.write(json.dumps(controlJsonObj, indent = 4))\n", - "controlJsonFile.close()\n", - "print(f'Created deployment configuration folder: {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "3110ab23-ecfc-4e36-a1c5-28536b7edebf", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Create SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7d56d262-8cd5-49e4-b745-332c6e7a3cb2" - } - }, - { - "cell_type": "code", - "source": [ - "print (f'Creating SQL Server 2019 Big Data Cluster: {mssql_cluster_name} using configuration {mssql_target_profile}')\n", - "os.environ[\"ACCEPT_EULA\"] = 'yes'\n", - "os.environ[\"AZDATA_USERNAME\"] = mssql_username\n", - "os.environ[\"AZDATA_PASSWORD\"] = mssql_password\n", - "if mssql_auth_mode == \"ad\":\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_USERNAME\"] = mssql_domain_service_account_username\n", - " os.environ[\"DOMAIN_SERVICE_ACCOUNT_PASSWORD\"] = mssql_domain_service_account_password\n", - "if os.name == 'nt':\n", - " print(f'If you don\\'t see output produced by azdata, you can run the following command in a terminal window to check the deployment status:\\n\\t{os.environ[\"AZDATA_NB_VAR_KUBECTL\"]} get pods -n {mssql_cluster_name} ')\n", - "run_command(f'azdata bdc create -c {mssql_target_profile}')" - ], - "metadata": { - "azdata_cell_guid": "0a743e88-e7d0-4b41-b8a3-e43985d15f2b", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Login to SQL Server 2019 Big Data Cluster**" - ], - "metadata": { - "azdata_cell_guid": "7929fd90-324d-482a-a101-ae29cb183691" - } - }, - { - "cell_type": "code", - "source": [ - "run_command(f'azdata login --namespace {mssql_cluster_name}')" - ], - "metadata": { - "azdata_cell_guid": "3a49909b-e09e-4e62-a825-c39de2cffc94", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Show SQL Server 2019 Big Data Cluster endpoints**" - ], - "metadata": { - "azdata_cell_guid": "038e801a-a393-4f8d-8e2d-97bc3b740b0c" - } - }, - { - "cell_type": "code", - "source": [ - "from IPython.display import *\n", - "pandas.set_option('display.max_colwidth', -1)\n", - "cmd = f'azdata bdc endpoint list'\n", - "cmdOutput = !{cmd}\n", - "endpoints = json.loads(''.join(cmdOutput))\n", - "endpointsDataFrame = pandas.DataFrame(endpoints)\n", - "endpointsDataFrame.columns = [' '.join(word[0].upper() + word[1:] for word in columnName.split()) for columnName in endpoints[0].keys()]\n", - "display(HTML(endpointsDataFrame.to_html(index=False, render_links=True)))" - ], - "metadata": { - "azdata_cell_guid": "2a8c8d5d-862c-4672-9309-38aa03afc4e6", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "source": [ - "### **Connect to SQL Server Master instance in Azure Data Studio**\n", - "Click the link below to connect to the SQL Server Master instance of the SQL Server 2019 Big Data Cluster." - ], - "metadata": { - "azdata_cell_guid": "0bd809fa-8225-4954-a50c-da57ea167896" - } - }, - { - "cell_type": "code", - "source": [ - "sqlEndpoints = [x for x in endpoints if x['name'] == 'sql-server-master']\n", - "if sqlEndpoints and len(sqlEndpoints) == 1:\n", - " connectionParameter = '{\"serverName\":\"' + sqlEndpoints[0]['endpoint'] + '\",\"providerName\":\"MSSQL\",\"authenticationType\":\"SqlLogin\",\"userName\":' + json.dumps(mssql_username) + ',\"password\":' + json.dumps(mssql_password) + '}'\n", - " display(HTML('
Click here to connect to SQL Server Master instance
'))\n", - " display(HTML('
NOTE: The SQL Server password is included in this link, you may want to clear the results of this code cell before saving the notebook.'))\n", - "else:\n", - " sys.exit('Could not find the SQL Server Master instance endpoint.')" - ], - "metadata": { - "azdata_cell_guid": "d591785d-71aa-4c5d-9cbb-a7da79bca503", - "tags": [ - "hide_input" - ] - }, - "outputs": [], - "execution_count": null - } - ] -} diff --git a/extensions/big-data-cluster/package.json b/extensions/big-data-cluster/package.json deleted file mode 100644 index cd39a7ac8d..0000000000 --- a/extensions/big-data-cluster/package.json +++ /dev/null @@ -1,369 +0,0 @@ -{ - "name": "big-data-cluster", - "displayName": "%text.sqlServerBigDataClusters%", - "description": "%description%", - "version": "1.0.0", - "publisher": "Microsoft", - "preview": true, - "license": "https://raw.githubusercontent.com/Microsoft/azuredatastudio/main/LICENSE.txt", - "icon": "images/extension.png", - "engines": { - "vscode": "*", - "azdata": "*" - }, - "activationEvents": [ - "onCommand:azdata.resource.deploy", - "onCommand:bigDataClusters.command.mount", - "onCommand:bigDataClusters.command.refreshmount", - "onCommand:bigDataClusters.command.deletemount", - "onCommand:bigDataClusters.command.createController", - "onCommand:bigDataClusters.command.connectController", - "onCommand:bigDataClusters.command.removeController", - "onCommand:bigDataClusters.command.manageController", - "onCommand:bigDataClusters.command.refreshController", - "onView:sqlBigDataCluster" - ], - "repository": { - "type": "git", - "url": "https://github.com/Microsoft/azuredatastudio.git" - }, - "capabilities": { - "virtualWorkspaces": false, - "untrustedWorkspaces": { - "supported": true - } - }, - "main": "./out/extension", - "contributes": { - "dataExplorer": { - "sqlBigDataCluster": [ - { - "id": "sqlBigDataCluster", - "name": "%text.sqlServerBigDataClusters%" - } - ] - }, - "menus": { - "commandPalette": [ - { - "command": "bigDataClusters.command.createController", - "when": "false" - }, - { - "command": "bigDataClusters.command.connectController", - "when": "false" - }, - { - "command": "bigDataClusters.command.removeController", - "when": "false" - }, - { - "command": "bigDataClusters.command.refreshController", - "when": "false" - }, - { - "command": "bigDataClusters.command.manageController", - "when": "false" - }, - { - "command": "bigDataClusters.command.mount", - "when": "false" - }, - { - "command": "bigDataClusters.command.refreshmount", - "when": "false" - }, - { - "command": "bigDataClusters.command.deletemount", - "when": "false" - } - ], - "view/title": [ - { - "command": "bigDataClusters.command.createController", - "when": "view == sqlBigDataCluster", - "group": "navigation@1" - }, - { - "command": "bigDataClusters.command.connectController", - "when": "view == sqlBigDataCluster", - "group": "navigation@2" - } - ], - "view/item/context": [ - { - "command": "bigDataClusters.command.manageController", - "when": "view == sqlBigDataCluster && viewItem == bigDataClusters.itemType.controllerNode", - "group": "navigation@1" - }, - { - "command": "bigDataClusters.command.refreshController", - "when": "view == sqlBigDataCluster && viewItem == bigDataClusters.itemType.controllerNode", - "group": "navigation@2" - }, - { - "command": "bigDataClusters.command.removeController", - "when": "view == sqlBigDataCluster && viewItem == bigDataClusters.itemType.controllerNode", - "group": "navigation@3" - } - ], - "objectExplorer/item/context": [ - { - "command": "bigDataClusters.command.mount", - "when": "nodeType=~/^mssqlCluster/ && nodeType!=mssqlCluster:message && nodeSubType=~/^(?!:mount).*$/", - "group": "1mssqlCluster@10" - }, - { - "command": "bigDataClusters.command.refreshmount", - "when": "nodeType == mssqlCluster:folder && nodeSubType==:mount:", - "group": "1mssqlCluster@11" - }, - { - "command": "bigDataClusters.command.deletemount", - "when": "nodeType == mssqlCluster:folder && nodeSubType==:mount:", - "group": "1mssqlCluster@12" - } - ] - }, - "commands": [ - { - "command": "bigDataClusters.command.createController", - "title": "%command.createController.title%", - "icon": "$(add)" - }, - { - "command": "bigDataClusters.command.connectController", - "title": "%command.connectController.title%", - "icon": "$(disconnect)" - }, - { - "command": "bigDataClusters.command.removeController", - "title": "%command.removeController.title%", - "when": "viewItem == bigDataClusters.itemType.controllerNode" - }, - { - "command": "bigDataClusters.command.refreshController", - "title": "%command.refreshController.title%", - "icon": "$(refresh)" - }, - { - "command": "bigDataClusters.command.manageController", - "title": "%command.manageController.title%" - }, - { - "command": "bigDataClusters.command.mount", - "title": "%command.mount.title%" - }, - { - "command": "bigDataClusters.command.refreshmount", - "title": "%command.refreshmount.title%" - }, - { - "command": "bigDataClusters.command.deletemount", - "title": "%command.deletemount.title%" - } - ], - "configuration": { - "type": "object", - "title": "%bdc.configuration.title%", - "properties": { - "bigDataCluster.ignoreSslVerification": { - "type": "boolean", - "default": true, - "description": "%bdc.ignoreSslVerification.desc%" - } - } - }, - "viewsWelcome": [ - { - "view": "sqlBigDataCluster", - "contents": "%bdc.view.welcome.connect%", - "when": "bdc.loaded" - }, - { - "view": "sqlBigDataCluster", - "contents": "%bdc.view.welcome.loading%", - "when": "!bdc.loaded" - } - ], - "resourceDeploymentTypes": [ - { - "name": "sql-bdc", - "displayIndex": 3, - "displayName": "%resource-type-sql-bdc-display-name%", - "description": "%resource-type-sql-bdc-description%", - "platforms": "*", - "icon": "./images/sql_bdc.svg", - "tags": [ - "On-premises", - "SQL Server", - "Cloud" - ], - "options": [ - { - "name": "version", - "displayName": "%version-display-name%", - "values": [ - { - "name": "bdc2019", - "displayName": "%bdc-2019-display-name%" - } - ] - }, - { - "name": "target", - "displayName": "%bdc-deployment-target%", - "values": [ - { - "name": "new-aks", - "displayName": "%bdc-deployment-target-new-aks%" - }, - { - "name": "existing-aks", - "displayName": "%bdc-deployment-target-existing-aks%" - }, - { - "name": "existing-kubeadm", - "displayName": "%bdc-deployment-target-existing-kubeadm%" - }, - { - "name": "existing-aro", - "displayName": "%bdc-deployment-target-existing-aro%" - }, - { - "name": "existing-openshift", - "displayName": "%bdc-deployment-target-existing-openshift%" - } - ] - } - ], - "providers": [ - { - "name": "sql-bdc_new-aks_bdc2019", - "bdcWizard": { - "type": "new-aks", - "notebook": "./notebooks/deployment/2019/deploy-bdc-aks.ipynb" - }, - "requiredTools": [ - { - "name": "kubectl", - "version": "1.13.0" - }, - { - "name": "azure-cli" - }, - { - "name": "azdata", - "version": "20.3.9" - } - ], - "when": "target=new-aks&&version=bdc2019" - }, - { - "name": "sql-bdc_existing-aks_bdc2019", - "bdcWizard": { - "type": "existing-aks", - "notebook": "./notebooks/deployment/2019/deploy-bdc-existing-aks.ipynb" - }, - "requiredTools": [ - { - "name": "kubectl", - "version": "1.13.0" - }, - { - "name": "azdata", - "version": "20.3.9" - } - ], - "when": "target=existing-aks&&version=bdc2019" - }, - { - "name": "sql-bdc_existing-kubeadm_bdc2019", - "bdcWizard": { - "type": "existing-kubeadm", - "notebook": "./notebooks/deployment/2019/deploy-bdc-existing-kubeadm.ipynb" - }, - "requiredTools": [ - { - "name": "kubectl", - "version": "1.13.0" - }, - { - "name": "azdata", - "version": "20.3.9" - } - ], - "when": "target=existing-kubeadm&&version=bdc2019" - }, - { - "name": "sql-bdc_existing-aro_bdc2019", - "bdcWizard": { - "type": "existing-aro", - "notebook": "./notebooks/deployment/2019/deploy-bdc-existing-aro.ipynb" - }, - "requiredTools": [ - { - "name": "kubectl", - "version": "1.13.0" - }, - { - "name": "azdata", - "version": "20.3.9" - } - ], - "when": "target=existing-aro&&version=bdc2019" - }, - { - "name": "sql-bdc_existing-openshift_bdc2019", - "bdcWizard": { - "type": "existing-openshift", - "notebook": "./notebooks/deployment/2019/deploy-bdc-existing-openshift.ipynb" - }, - "requiredTools": [ - { - "name": "kubectl", - "version": "1.13.0" - }, - { - "name": "azdata", - "version": "20.3.9" - } - ], - "when": "target=existing-openshift&&version=bdc2019" - } - ], - "agreements": [ - { - "template": "%bdc-agreement%", - "links": [ - { - "text": "%microsoft-privacy-statement%", - "url": "https://go.microsoft.com/fwlink/?LinkId=853010" - }, - { - "text": "%bdc-agreement-bdc-eula%", - "url": "https://go.microsoft.com/fwlink/?LinkId=2002534" - }, - { - "text": "%bdc-agreement-azdata-eula%", - "url": "https://aka.ms/eula-azdata-en" - } - ], - "when": "true" - } - ] - } - ] - }, - "dependencies": { - "@microsoft/ads-kerberos": "^1.1.3", - "request": "^2.88.0", - "vscode-nls": "^4.0.0" - }, - "devDependencies": { - "@types/request": "^2.48.3" - }, - "resolutions": { - "json-schema": "0.4.0" - } -} diff --git a/extensions/big-data-cluster/package.nls.json b/extensions/big-data-cluster/package.nls.json deleted file mode 100644 index aee045d3ed..0000000000 --- a/extensions/big-data-cluster/package.nls.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "description": "Support for managing SQL Server Big Data Clusters", - "text.sqlServerBigDataClusters": "SQL Server Big Data Clusters", - "command.connectController.title": "Connect to Existing Controller", - "command.createController.title": "Create New Controller", - "command.removeController.title": "Remove Controller", - "command.refreshController.title": "Refresh", - "command.manageController.title": "Manage", - "command.mount.title": "Mount HDFS", - "command.refreshmount.title": "Refresh Mount", - "command.deletemount.title": "Delete Mount", - "bdc.configuration.title": "Big Data Cluster", - "bdc.view.welcome.connect": "No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview)\n[Connect Controller](command:bigDataClusters.command.connectController)", - "bdc.view.welcome.loading": "Loading controllers...", - "bdc.ignoreSslVerification.desc": "Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true", - "resource-type-sql-bdc-display-name": "SQL Server Big Data Cluster", - "resource-type-sql-bdc-description": "SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes", - "version-display-name": "Version", - "bdc-2019-display-name": "SQL Server 2019", - "bdc-deployment-target": "Deployment target", - "bdc-deployment-target-new-aks": "New Azure Kubernetes Service Cluster", - "bdc-deployment-target-existing-aks": "Existing Azure Kubernetes Service Cluster", - "bdc-deployment-target-existing-kubeadm": "Existing Kubernetes Cluster (kubeadm)", - "bdc-deployment-target-existing-aro": "Existing Azure Red Hat OpenShift cluster", - "bdc-deployment-target-existing-openshift": "Existing OpenShift cluster", - "bdc-cluster-settings-section-title": "SQL Server Big Data Cluster settings", - "bdc-cluster-name-field": "Cluster name", - "bdc-controller-username-field": "Controller username", - "bdc-password-field": "Password", - "bdc-confirm-password-field": "Confirm password", - "bdc-azure-settings-section-title": "Azure settings", - "bdc-azure-subscription-id-field": "Subscription id", - "bdc-azure-subscription-id-placeholder": "Use my default Azure subscription", - "bdc-azure-resource-group-field": "Resource group name", - "bdc-azure-region-field": "Region", - "bdc-azure-aks-name-field": "AKS cluster name", - "bdc-azure-vm-size-field": "VM size", - "bdc-azure-vm-count-field": "VM count", - "bdc-storage-class-field": "Storage class name", - "bdc-data-size-field": "Capacity for data (GB)", - "bdc-log-size-field": "Capacity for logs (GB)", - "bdc-agreement": "I accept {0}, {1} and {2}.", - "microsoft-privacy-statement": "Microsoft Privacy Statement", - "bdc-agreement-azdata-eula": "azdata License Terms", - "bdc-agreement-bdc-eula": "SQL Server License Terms" -} diff --git a/extensions/big-data-cluster/resources/dark/bigDataCluster_controller.svg b/extensions/big-data-cluster/resources/dark/bigDataCluster_controller.svg deleted file mode 100644 index 428b8af38c..0000000000 --- a/extensions/big-data-cluster/resources/dark/bigDataCluster_controller.svg +++ /dev/null @@ -1 +0,0 @@ -centralmanagement_server_16x \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/dark/copy_inverse.svg b/extensions/big-data-cluster/resources/dark/copy_inverse.svg deleted file mode 100644 index fa65571127..0000000000 --- a/extensions/big-data-cluster/resources/dark/copy_inverse.svg +++ /dev/null @@ -1 +0,0 @@ -copy_inverse \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/dark/notebook_inverse.svg b/extensions/big-data-cluster/resources/dark/notebook_inverse.svg deleted file mode 100644 index 841199cf11..0000000000 --- a/extensions/big-data-cluster/resources/dark/notebook_inverse.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/extensions/big-data-cluster/resources/dark/refresh_inverse.svg b/extensions/big-data-cluster/resources/dark/refresh_inverse.svg deleted file mode 100644 index d79fdaa4e8..0000000000 --- a/extensions/big-data-cluster/resources/dark/refresh_inverse.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/dark/status_ok_dark.svg b/extensions/big-data-cluster/resources/dark/status_ok_dark.svg deleted file mode 100644 index 776e1fd909..0000000000 --- a/extensions/big-data-cluster/resources/dark/status_ok_dark.svg +++ /dev/null @@ -1 +0,0 @@ -success_16x16 \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/dark/status_warning_dark.svg b/extensions/big-data-cluster/resources/dark/status_warning_dark.svg deleted file mode 100644 index a267963e58..0000000000 --- a/extensions/big-data-cluster/resources/dark/status_warning_dark.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/extensions/big-data-cluster/resources/light/bigDataCluster_controller.svg b/extensions/big-data-cluster/resources/light/bigDataCluster_controller.svg deleted file mode 100644 index 428b8af38c..0000000000 --- a/extensions/big-data-cluster/resources/light/bigDataCluster_controller.svg +++ /dev/null @@ -1 +0,0 @@ -centralmanagement_server_16x \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/light/copy.svg b/extensions/big-data-cluster/resources/light/copy.svg deleted file mode 100644 index 91692de258..0000000000 --- a/extensions/big-data-cluster/resources/light/copy.svg +++ /dev/null @@ -1 +0,0 @@ -copy \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/light/notebook.svg b/extensions/big-data-cluster/resources/light/notebook.svg deleted file mode 100644 index 2711d10b2a..0000000000 --- a/extensions/big-data-cluster/resources/light/notebook.svg +++ /dev/null @@ -1,7 +0,0 @@ - - Artboard 20 - - - - - diff --git a/extensions/big-data-cluster/resources/light/refresh.svg b/extensions/big-data-cluster/resources/light/refresh.svg deleted file mode 100644 index e034574819..0000000000 --- a/extensions/big-data-cluster/resources/light/refresh.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/light/status_ok_light.svg b/extensions/big-data-cluster/resources/light/status_ok_light.svg deleted file mode 100644 index 776e1fd909..0000000000 --- a/extensions/big-data-cluster/resources/light/status_ok_light.svg +++ /dev/null @@ -1 +0,0 @@ -success_16x16 \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/light/status_warning_light.svg b/extensions/big-data-cluster/resources/light/status_warning_light.svg deleted file mode 100644 index f2e2aa741e..0000000000 --- a/extensions/big-data-cluster/resources/light/status_warning_light.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/extensions/big-data-cluster/resources/status_circle_blank.svg b/extensions/big-data-cluster/resources/status_circle_blank.svg deleted file mode 100644 index af5badbac8..0000000000 --- a/extensions/big-data-cluster/resources/status_circle_blank.svg +++ /dev/null @@ -1,2 +0,0 @@ - - \ No newline at end of file diff --git a/extensions/big-data-cluster/resources/status_circle_red.svg b/extensions/big-data-cluster/resources/status_circle_red.svg deleted file mode 100644 index f73edde71c..0000000000 --- a/extensions/big-data-cluster/resources/status_circle_red.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/extensions/big-data-cluster/src/bdc.d.ts b/extensions/big-data-cluster/src/bdc.d.ts deleted file mode 100644 index c1420a3bd2..0000000000 --- a/extensions/big-data-cluster/src/bdc.d.ts +++ /dev/null @@ -1,44 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -declare module 'bdc' { - - export const enum constants { - extensionName = 'Microsoft.big-data-cluster' - } - - export interface IExtension { - getClusterController(url: string, authType: AuthType, username?: string, password?: string): IClusterController; - } - - export interface IEndpointModel { - name?: string; - description?: string; - endpoint?: string; - protocol?: string; - } - - export interface IHttpResponse { - method?: string; - url?: string; - statusCode?: number; - statusMessage?: string; - } - - export interface IEndPointsResponse { - response: IHttpResponse; - endPoints: IEndpointModel[]; - } - - export type AuthType = 'integrated' | 'basic'; - - export interface IClusterController { - getClusterConfig(): Promise; - getKnoxUsername(defaultUsername: string): Promise; - getEndPoints(promptConnect?: boolean): Promise - username: string; - password: string; - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/auth.ts b/extensions/big-data-cluster/src/bigDataCluster/auth.ts deleted file mode 100644 index f35c282ba9..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/auth.ts +++ /dev/null @@ -1,34 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as kerberos from '@microsoft/ads-kerberos'; -import * as vscode from 'vscode'; - -export async function authenticateKerberos(hostname: string): Promise { - const service = 'HTTP' + (process.platform === 'win32' ? '/' : '@') + hostname; - const mechOID = kerberos.GSS_MECH_OID_KRB5; - let client = await kerberos.initializeClient(service, { mechOID }); - let response = await client.step(''); - return response; -} - - -type HostAndIp = { host: string, port: string }; - -export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp { - let authority = vscode.Uri.parse(endpoint).authority; - let hostAndPortRegex = /^(.*)([,:](\d+))/g; - let match = hostAndPortRegex.exec(authority); - if (match) { - return { - host: match[1], - port: match[3] - }; - } - return { - host: authority, - port: undefined - }; -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/constants.ts b/extensions/big-data-cluster/src/bigDataCluster/constants.ts deleted file mode 100644 index 26f9de0804..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/constants.ts +++ /dev/null @@ -1,77 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; - -export enum BdcItemType { - controllerRoot = 'bigDataClusters.itemType.controllerRootNode', - controller = 'bigDataClusters.itemType.controllerNode', - loadingController = 'bigDataClusters.itemType.loadingControllerNode' -} - -export interface IconPath { - dark: string; - light: string; -} - -export class IconPathHelper { - private static extensionContext: vscode.ExtensionContext; - - public static controllerNode: IconPath; - public static copy: IconPath; - public static refresh: IconPath; - public static status_ok: IconPath; - public static status_warning: IconPath; - public static notebook: IconPath; - public static status_circle_red: IconPath; - public static status_circle_blank: IconPath; - - public static setExtensionContext(extensionContext: vscode.ExtensionContext) { - IconPathHelper.extensionContext = extensionContext; - IconPathHelper.controllerNode = { - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/bigDataCluster_controller.svg'), - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/bigDataCluster_controller.svg') - }; - IconPathHelper.copy = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/copy.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/copy_inverse.svg') - }; - IconPathHelper.refresh = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/refresh.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/refresh_inverse.svg') - }; - IconPathHelper.status_ok = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/status_ok_light.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/status_ok_dark.svg') - }; - IconPathHelper.status_warning = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/status_warning_light.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/status_warning_dark.svg') - }; - IconPathHelper.notebook = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/light/notebook.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/dark/notebook_inverse.svg') - }; - IconPathHelper.status_circle_red = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/status_circle_red.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/status_circle_red.svg') - }; - IconPathHelper.status_circle_blank = { - light: IconPathHelper.extensionContext.asAbsolutePath('resources/status_circle_blank.svg'), - dark: IconPathHelper.extensionContext.asAbsolutePath('resources/status_circle_blank.svg') - }; - } -} - -export namespace cssStyles { - export const title = { 'font-size': '14px', 'font-weight': '600' }; - export const tableHeader = { 'text-align': 'left', 'font-weight': 'bold', 'text-transform': 'uppercase', 'font-size': '10px', 'user-select': 'text' }; - export const text = { 'margin-block-start': '0px', 'margin-block-end': '0px' }; - export const lastUpdatedText = { ...text, 'color': '#595959' }; - export const errorText = { ...text, 'color': 'red' }; -} - -export const clusterEndpointsProperty = 'clusterEndpoints'; -export const controllerEndpointName = 'controller'; diff --git a/extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts b/extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts deleted file mode 100644 index f2ce5d540e..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/controller/apiGenerated.ts +++ /dev/null @@ -1,1821 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -/* tslint:disable: no-unexternalized-strings */ -/* tslint:disable: semicolon */ -/* tslint:disable: triple-equals */ -/* tslint:disable: no-redundant-jsdoc */ - -import localVarRequest = require('request'); -import http = require('http'); -import * as bdc from 'bdc'; - -let defaultBasePath = 'https://localhost'; - -// =============================================== -// This file is autogenerated - Please do not edit -// =============================================== - -/* tslint:disable:no-unused-variable */ -let primitives = [ - "string", - "boolean", - "double", - "integer", - "long", - "float", - "number", - "any" - ]; - -class ObjectSerializer { - - public static findCorrectType(data: any, expectedType: string) { - if (data == undefined) { - return expectedType; - } else if (primitives.indexOf(expectedType.toLowerCase()) !== -1) { - return expectedType; - } else if (expectedType === "Date") { - return expectedType; - } else { - if (enumsMap[expectedType]) { - return expectedType; - } - - if (!typeMap[expectedType]) { - return expectedType; // w/e we don't know the type - } - - // Check the discriminator - let discriminatorProperty = typeMap[expectedType].discriminator; - if (discriminatorProperty == null) { - return expectedType; // the type does not have a discriminator. use it. - } else { - if (data[discriminatorProperty]) { - return data[discriminatorProperty]; // use the type given in the discriminator - } else { - return expectedType; // discriminator was not present (or an empty string) - } - } - } - } - - public static serialize(data: any, type: string) { - if (data == undefined) { - return data; - } else if (primitives.indexOf(type.toLowerCase()) !== -1) { - return data; - } else if (type.lastIndexOf("Array<", 0) === 0) { // string.startsWith pre es6 - let subType: string = type.replace("Array<", ""); // Array => Type> - subType = subType.substring(0, subType.length - 1); // Type> => Type - let transformedData: any[] = []; - for (let index in data) { - let date = data[index]; - transformedData.push(ObjectSerializer.serialize(date, subType)); - } - return transformedData; - } else if (type === "Date") { - return data.toString(); - } else { - if (enumsMap[type]) { - return data; - } - if (!typeMap[type]) { // in case we dont know the type - return data; - } - - // get the map for the correct type. - let attributeTypes = typeMap[type].getAttributeTypeMap(); - let instance: {[index: string]: any} = {}; - for (let index in attributeTypes) { - let attributeType = attributeTypes[index]; - instance[attributeType.baseName] = ObjectSerializer.serialize(data[attributeType.name], attributeType.type); - } - return instance; - } - } - - public static deserialize(data: any, type: string) { - // polymorphism may change the actual type. - type = ObjectSerializer.findCorrectType(data, type); - if (data == undefined) { - return data; - } else if (primitives.indexOf(type.toLowerCase()) !== -1) { - return data; - } else if (type.lastIndexOf("Array<", 0) === 0) { // string.startsWith pre es6 - let subType: string = type.replace("Array<", ""); // Array => Type> - subType = subType.substring(0, subType.length - 1); // Type> => Type - let transformedData: any[] = []; - for (let index in data) { - let date = data[index]; - transformedData.push(ObjectSerializer.deserialize(date, subType)); - } - return transformedData; - } else if (type === "Date") { - return new Date(data); - } else { - if (enumsMap[type]) {// is Enum - return data; - } - - if (!typeMap[type]) { // dont know the type - return data; - } - let instance = new typeMap[type](); - let attributeTypes = typeMap[type].getAttributeTypeMap(); - for (let index in attributeTypes) { - let attributeType = attributeTypes[index]; - instance[attributeType.name] = ObjectSerializer.deserialize(data[attributeType.baseName], attributeType.type); - } - return instance; - } - } -} - -export class BdcStatusModel { - 'bdcName'?: string; - 'state'?: string; - 'healthStatus'?: string; - 'details'?: string; - 'services'?: Array; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "bdcName", - "baseName": "bdcName", - "type": "string" - }, - { - "name": "state", - "baseName": "state", - "type": "string" - }, - { - "name": "healthStatus", - "baseName": "healthStatus", - "type": "string" - }, - { - "name": "details", - "baseName": "details", - "type": "string" - }, - { - "name": "services", - "baseName": "services", - "type": "Array" - } ]; - - static getAttributeTypeMap() { - return BdcStatusModel.attributeTypeMap; - } -} - -export class Dashboards { - 'nodeMetricsUrl'?: string; - 'sqlMetricsUrl'?: string; - 'logsUrl'?: string; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "nodeMetricsUrl", - "baseName": "nodeMetricsUrl", - "type": "string" - }, - { - "name": "sqlMetricsUrl", - "baseName": "sqlMetricsUrl", - "type": "string" - }, - { - "name": "logsUrl", - "baseName": "logsUrl", - "type": "string" - } ]; - - static getAttributeTypeMap() { - return Dashboards.attributeTypeMap; - } -} - -export class EndpointModel implements bdc.IEndpointModel { - 'name'?: string; - 'description'?: string; - 'endpoint'?: string; - 'protocol'?: string; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "name", - "baseName": "name", - "type": "string" - }, - { - "name": "description", - "baseName": "description", - "type": "string" - }, - { - "name": "endpoint", - "baseName": "endpoint", - "type": "string" - }, - { - "name": "protocol", - "baseName": "protocol", - "type": "string" - } ]; - - static getAttributeTypeMap() { - return EndpointModel.attributeTypeMap; - } -} - -export class InstanceStatusModel { - 'instanceName'?: string; - 'state'?: string; - 'healthStatus'?: string; - 'details'?: string; - 'dashboards'?: Dashboards; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "instanceName", - "baseName": "instanceName", - "type": "string" - }, - { - "name": "state", - "baseName": "state", - "type": "string" - }, - { - "name": "healthStatus", - "baseName": "healthStatus", - "type": "string" - }, - { - "name": "details", - "baseName": "details", - "type": "string" - }, - { - "name": "dashboards", - "baseName": "dashboards", - "type": "Dashboards" - } ]; - - static getAttributeTypeMap() { - return InstanceStatusModel.attributeTypeMap; - } -} - -export class ResourceStatusModel { - 'resourceName'?: string; - 'state'?: string; - 'healthStatus'?: string; - 'details'?: string; - 'instances'?: Array; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "resourceName", - "baseName": "resourceName", - "type": "string" - }, - { - "name": "state", - "baseName": "state", - "type": "string" - }, - { - "name": "healthStatus", - "baseName": "healthStatus", - "type": "string" - }, - { - "name": "details", - "baseName": "details", - "type": "string" - }, - { - "name": "instances", - "baseName": "instances", - "type": "Array" - } ]; - - static getAttributeTypeMap() { - return ResourceStatusModel.attributeTypeMap; - } -} - -export class ServiceStatusModel { - 'serviceName'?: string; - 'state'?: string; - 'healthStatus'?: string; - 'details'?: string; - 'resources'?: Array; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{name: string, baseName: string, type: string}> = [ - { - "name": "serviceName", - "baseName": "serviceName", - "type": "string" - }, - { - "name": "state", - "baseName": "state", - "type": "string" - }, - { - "name": "healthStatus", - "baseName": "healthStatus", - "type": "string" - }, - { - "name": "details", - "baseName": "details", - "type": "string" - }, - { - "name": "resources", - "baseName": "resources", - "type": "Array" - } ]; - - static getAttributeTypeMap() { - return ServiceStatusModel.attributeTypeMap; - } -} - - -let enumsMap: {[index: string]: any} = { -} - -let typeMap: {[index: string]: any} = { - "BdcStatusModel": BdcStatusModel, - "Dashboards": Dashboards, - "EndpointModel": EndpointModel, - "InstanceStatusModel": InstanceStatusModel, - "ResourceStatusModel": ResourceStatusModel, - "ServiceStatusModel": ServiceStatusModel, -} - -export interface Authentication { - /** - * Apply authentication settings to header and query params. - */ - applyToRequest(requestOptions: localVarRequest.Options): void; -} - -export class HttpBasicAuth implements Authentication { - public username: string = ''; - public password: string = ''; - - applyToRequest(requestOptions: localVarRequest.Options): void { - requestOptions.auth = { - username: this.username, password: this.password - } - } -} - -export class ApiKeyAuth implements Authentication { - public apiKey: string = ''; - - constructor(private location: string, private paramName: string) { - } - - applyToRequest(requestOptions: localVarRequest.Options): void { - if (this.location == "query") { - (requestOptions.qs)[this.paramName] = this.apiKey; - } else if (this.location == "header" && requestOptions && requestOptions.headers) { - requestOptions.headers[this.paramName] = this.apiKey; - } - } -} - -export class OAuth implements Authentication { - public accessToken: string = ''; - - applyToRequest(requestOptions: localVarRequest.Options): void { - if (requestOptions && requestOptions.headers) { - requestOptions.headers["Authorization"] = "Bearer " + this.accessToken; - } - } -} - -export class VoidAuth implements Authentication { - public username: string = ''; - public password: string = ''; - - applyToRequest(_: localVarRequest.Options): void { - // Do nothing - } -} - -export enum BdcRouterApiApiKeys { -} - -export class BdcRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders : any = {}; - protected _useQuerystring : boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'basicAuth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: BdcRouterApiApiKeys, value: string) { - (this.authentications as any)[BdcRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.basicAuth.username = username; - } - - set password(password: string) { - this.authentications.basicAuth.password = password; - } - /** - * - * @summary Create a cluster - * @param xRequestId - * @param connection - * @param data Cluster configuration in JSON format - * @param {*} [options] Override http request options. - */ - public createCluster (xRequestId: string, connection: string, data: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling createCluster.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling createCluster.'); - } - - // verify required parameter 'data' is not null or undefined - if (data === null || data === undefined) { - throw new Error('Required parameter data was null or undefined when calling createCluster.'); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - body: ObjectSerializer.serialize(data, "string") - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @param endpointName - * @param {*} [options] Override http request options. - */ - public endpointsByNameGet (endpointName: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: EndpointModel; }> { - const localVarPath = this.basePath + '/api/v1/bdc/endpoints/{endpointName}' - .replace('{' + 'endpointName' + '}', encodeURIComponent(String(endpointName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'endpointName' is not null or undefined - if (endpointName === null || endpointName === undefined) { - throw new Error('Required parameter endpointName was null or undefined when calling endpointsByNameGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: EndpointModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "EndpointModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @param {*} [options] Override http request options. - */ - public endpointsGet (options: any = {}) : Promise<{ response: http.IncomingMessage; body: Array; }> { - const localVarPath = this.basePath + '/api/v1/bdc/endpoints'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: Array; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "Array"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get BDC status of services that contain this resource - * @param resourceName The name of the resource you want the services for - * @param xRequestId - * @param connection - * @param all Whether you want all of the instances within the given resource - * @param {*} [options] Override http request options. - */ - public getBdcResourceStatus (resourceName: string, xRequestId?: string, connection?: string, all?: boolean, options: any = {}) : Promise<{ response: http.IncomingMessage; body: BdcStatusModel; }> { - const localVarPath = this.basePath + '/api/v1/bdc/resources/{resourceName}/status' - .replace('{' + 'resourceName' + '}', encodeURIComponent(String(resourceName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'resourceName' is not null or undefined - if (resourceName === null || resourceName === undefined) { - throw new Error('Required parameter resourceName was null or undefined when calling getBdcResourceStatus.'); - } - - if (all !== undefined) { - localVarQueryParameters['all'] = ObjectSerializer.serialize(all, "boolean"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: BdcStatusModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "BdcStatusModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get resource status within this service - * @param serviceName The name of the service in the BDC cluster that you want - * @param resourceName The name of the resource in the service that you want - * @param xRequestId - * @param connection - * @param all Whether you want all of the instances within the given resource - * @param {*} [options] Override http request options. - */ - public getBdcServiceResourceStatus (serviceName: string, resourceName: string, xRequestId?: string, connection?: string, all?: boolean, options: any = {}) : Promise<{ response: http.IncomingMessage; body: ResourceStatusModel; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/{serviceName}/resources/{resourceName}/status' - .replace('{' + 'serviceName' + '}', encodeURIComponent(String(serviceName))) - .replace('{' + 'resourceName' + '}', encodeURIComponent(String(resourceName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'serviceName' is not null or undefined - if (serviceName === null || serviceName === undefined) { - throw new Error('Required parameter serviceName was null or undefined when calling getBdcServiceResourceStatus.'); - } - - // verify required parameter 'resourceName' is not null or undefined - if (resourceName === null || resourceName === undefined) { - throw new Error('Required parameter resourceName was null or undefined when calling getBdcServiceResourceStatus.'); - } - - if (all !== undefined) { - localVarQueryParameters['all'] = ObjectSerializer.serialize(all, "boolean"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: ResourceStatusModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "ResourceStatusModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get BDC status - * @param xRequestId - * @param connection - * @param all Whether you want all of the instances within the given resource - * @param {*} [options] Override http request options. - */ - public getBdcStatus (xRequestId?: string, connection?: string, all?: boolean, options: any = {}) : Promise<{ response: http.IncomingMessage; body: BdcStatusModel; }> { - const localVarPath = this.basePath + '/api/v1/bdc/status'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - if (all !== undefined) { - localVarQueryParameters['all'] = ObjectSerializer.serialize(all, "boolean"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: BdcStatusModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "BdcStatusModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - - /** - * - * @summary Get cluster config - * @param xRequestId - * @param connection - * @param {*} [options] Override http request options. - */ - public getCluster (xRequestId?: string, connection?: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get console logs - * @param xRequestId - * @param connection - * @param offset - * @param {*} [options] Override http request options. - */ - public getLogs (xRequestId: string, connection: string, offset: number, options: any = {}) : Promise<{ response: http.IncomingMessage; body: string; }> { - const localVarPath = this.basePath + '/api/v1/bdc/log'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling getLogs.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling getLogs.'); - } - - // verify required parameter 'offset' is not null or undefined - if (offset === null || offset === undefined) { - throw new Error('Required parameter offset was null or undefined when calling getLogs.'); - } - - if (offset !== undefined) { - localVarQueryParameters['offset'] = ObjectSerializer.serialize(offset, "number"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: string; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "string"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} -export enum ControlRouterApiApiKeys { -} - -export class ControlRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders : any = {}; - protected _useQuerystring : boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'basicAuth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: ControlRouterApiApiKeys, value: string) { - (this.authentications as any)[ControlRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.basicAuth.username = username; - } - - set password(password: string) { - this.authentications.basicAuth.password = password; - } - /** - * - * @summary Get control status - * @param xRequestId - * @param connection - * @param {*} [options] Override http request options. - */ - public getControlStatus (xRequestId: string, connection: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/control/status'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling getControlStatus.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling getControlStatus.'); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} -export enum DefaultApiApiKeys { -} - -export class DefaultApi { - protected _basePath = defaultBasePath; - protected defaultHeaders : any = {}; - protected _useQuerystring : boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'basicAuth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: DefaultApiApiKeys, value: string) { - (this.authentications as any)[DefaultApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.basicAuth.username = username; - } - - set password(password: string) { - this.authentications.basicAuth.password = password; - } - /** - * - * @summary Create a mount - * @param xRequestId - * @param connection - * @param remote URI of remote store to mount - * @param mount Local path to mount on - * @param credentials Credentials to create the mount - * @param {*} [options] Override http request options. - */ - public createMount (xRequestId: string, connection: string, remote: string, mount: string, credentials?: any, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/hdfs/mounts'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling createMount.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling createMount.'); - } - - // verify required parameter 'remote' is not null or undefined - if (remote === null || remote === undefined) { - throw new Error('Required parameter remote was null or undefined when calling createMount.'); - } - - // verify required parameter 'mount' is not null or undefined - if (mount === null || mount === undefined) { - throw new Error('Required parameter mount was null or undefined when calling createMount.'); - } - - if (remote !== undefined) { - localVarQueryParameters['remote'] = ObjectSerializer.serialize(remote, "string"); - } - - if (mount !== undefined) { - localVarQueryParameters['mount'] = ObjectSerializer.serialize(mount, "string"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - body: ObjectSerializer.serialize(credentials, "any") - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Delete a cluster - * @param xRequestId - * @param connection - * @param {*} [options] Override http request options. - */ - public deleteCluster (xRequestId: string, connection: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling deleteCluster.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling deleteCluster.'); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'DELETE', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Delete a mount - * @param xRequestId - * @param connection - * @param mount Local HDFS mount path - * @param {*} [options] Override http request options. - */ - public deleteMount (xRequestId: string, connection: string, mount: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/hdfs/mounts'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling deleteMount.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling deleteMount.'); - } - - // verify required parameter 'mount' is not null or undefined - if (mount === null || mount === undefined) { - throw new Error('Required parameter mount was null or undefined when calling deleteMount.'); - } - - if (mount !== undefined) { - localVarQueryParameters['mount'] = ObjectSerializer.serialize(mount, "string"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'DELETE', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get health properties with specific query - * @param xRequestId - * @param connection - * @param query The query in the json format for the health properties - * @param {*} [options] Override http request options. - */ - public getHealthProperties (xRequestId: string, connection: string, query: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/health'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling getHealthProperties.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling getHealthProperties.'); - } - - // verify required parameter 'query' is not null or undefined - if (query === null || query === undefined) { - throw new Error('Required parameter query was null or undefined when calling getHealthProperties.'); - } - - if (query !== undefined) { - localVarQueryParameters['query'] = ObjectSerializer.serialize(query, "string"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Retrieve home page of Controller service - * @param xRequestId - * @param connection - * @param {*} [options] Override http request options. - */ - public getHome (xRequestId: string, connection: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling getHome.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling getHome.'); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get resource status - * @param bdcName The name of the BDC cluster from which you want the status - * @param serviceName The name of the service in the BDC cluster that you want - * @param xRequestId - * @param connection - * @param all Whether you want all of the instances within the given resource - * @param {*} [options] Override http request options. - */ - public getPoolStatus (bdcName: string, serviceName: string, xRequestId?: string, connection?: string, all?: boolean, options: any = {}) : Promise<{ response: http.IncomingMessage; body: ServiceStatusModel; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/{serviceName}/status' - .replace('{' + 'bdcName' + '}', encodeURIComponent(String(bdcName))) - .replace('{' + 'serviceName' + '}', encodeURIComponent(String(serviceName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'bdcName' is not null or undefined - if (bdcName === null || bdcName === undefined) { - throw new Error('Required parameter bdcName was null or undefined when calling getPoolStatus.'); - } - - // verify required parameter 'serviceName' is not null or undefined - if (serviceName === null || serviceName === undefined) { - throw new Error('Required parameter serviceName was null or undefined when calling getPoolStatus.'); - } - - if (all !== undefined) { - localVarQueryParameters['all'] = ObjectSerializer.serialize(all, "boolean"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: ServiceStatusModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "ServiceStatusModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Get list of all mounts - * @param xRequestId - * @param connection - * @param mount - * @param {*} [options] Override http request options. - */ - public listMounts (xRequestId: string, connection: string, mount?: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/hdfs/mounts'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling listMounts.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling listMounts.'); - } - - if (mount !== undefined) { - localVarQueryParameters['mount'] = ObjectSerializer.serialize(mount, "string"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Refresh a mount - * @param xRequestId - * @param connection - * @param mount Local path to mount on HDFS - * @param {*} [options] Override http request options. - */ - public refreshMount (xRequestId: string, connection: string, mount: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body: any; }> { - const localVarPath = this.basePath + '/api/v1/bdc/services/hdfs/mounts/refresh'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling refreshMount.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling refreshMount.'); - } - - // verify required parameter 'mount' is not null or undefined - if (mount === null || mount === undefined) { - throw new Error('Required parameter mount was null or undefined when calling refreshMount.'); - } - - if (mount !== undefined) { - localVarQueryParameters['mount'] = ObjectSerializer.serialize(mount, "string"); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "any"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Update the password for the given service and user - * @param xRequestId - * @param connection - * @param serviceName - * @param serviceUsername - * @param data Password and cluster name in JSON format - * @param {*} [options] Override http request options. - */ - public updatePassword (xRequestId: string, connection: string, serviceName: string, serviceUsername: string, data: string, options: any = {}) : Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/passwords/{serviceName}/{serviceUsername}' - .replace('{' + 'serviceName' + '}', encodeURIComponent(String(serviceName))) - .replace('{' + 'serviceUsername' + '}', encodeURIComponent(String(serviceUsername))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'xRequestId' is not null or undefined - if (xRequestId === null || xRequestId === undefined) { - throw new Error('Required parameter xRequestId was null or undefined when calling updatePassword.'); - } - - // verify required parameter 'connection' is not null or undefined - if (connection === null || connection === undefined) { - throw new Error('Required parameter connection was null or undefined when calling updatePassword.'); - } - - // verify required parameter 'serviceName' is not null or undefined - if (serviceName === null || serviceName === undefined) { - throw new Error('Required parameter serviceName was null or undefined when calling updatePassword.'); - } - - // verify required parameter 'serviceUsername' is not null or undefined - if (serviceUsername === null || serviceUsername === undefined) { - throw new Error('Required parameter serviceUsername was null or undefined when calling updatePassword.'); - } - - // verify required parameter 'data' is not null or undefined - if (data === null || data === undefined) { - throw new Error('Required parameter data was null or undefined when calling updatePassword.'); - } - - localVarHeaderParams['X-Request-Id'] = ObjectSerializer.serialize(xRequestId, "string"); - localVarHeaderParams['Connection'] = ObjectSerializer.serialize(connection, "string"); - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'PUT', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - body: ObjectSerializer.serialize(data, "string") - }; - - this.authentications.basicAuth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/controller/clusterApiGenerated2.ts b/extensions/big-data-cluster/src/bigDataCluster/controller/clusterApiGenerated2.ts deleted file mode 100644 index bd2bf7b84b..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/controller/clusterApiGenerated2.ts +++ /dev/null @@ -1,1510 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -// Note: this file will hopefully be merged with apiGenerated.ts at a later date, once the server-side code -// is available from a single source of truth. Until then, keeping as separate files - -/* tslint:disable: no-unexternalized-strings */ -/* tslint:disable: semicolon */ -/* tslint:disable: triple-equals */ -/* tslint:disable: no-redundant-jsdoc */ - -import localVarRequest = require('request'); -import http = require('http'); - -let defaultBasePath = 'https://localhost'; - -// =============================================== -// This file is autogenerated - Please do not edit -// =============================================== - -/* tslint:disable:no-unused-variable */ -let primitives = [ - "string", - "boolean", - "double", - "integer", - "long", - "float", - "number", - "any" -]; - -class ObjectSerializer { - - public static findCorrectType(data: any, expectedType: string) { - if (data == undefined) { - return expectedType; - } else if (primitives.indexOf(expectedType.toLowerCase()) !== -1) { - return expectedType; - } else if (expectedType === "Date") { - return expectedType; - } else { - if (enumsMap[expectedType]) { - return expectedType; - } - - if (!typeMap[expectedType]) { - return expectedType; // w/e we don't know the type - } - - // Check the discriminator - let discriminatorProperty = typeMap[expectedType].discriminator; - if (discriminatorProperty == null) { - return expectedType; // the type does not have a discriminator. use it. - } else { - if (data[discriminatorProperty]) { - return data[discriminatorProperty]; // use the type given in the discriminator - } else { - return expectedType; // discriminator was not present (or an empty string) - } - } - } - } - - public static serialize(data: any, type: string) { - if (data == undefined) { - return data; - } else if (primitives.indexOf(type.toLowerCase()) !== -1) { - return data; - } else if (type.lastIndexOf("Array<", 0) === 0) { // string.startsWith pre es6 - let subType: string = type.replace("Array<", ""); // Array => Type> - subType = subType.substring(0, subType.length - 1); // Type> => Type - let transformedData: any[] = []; - for (let index in data) { - let date = data[index]; - transformedData.push(ObjectSerializer.serialize(date, subType)); - } - return transformedData; - } else if (type === "Date") { - return data.toString(); - } else { - if (enumsMap[type]) { - return data; - } - if (!typeMap[type]) { // in case we dont know the type - return data; - } - - // get the map for the correct type. - let attributeTypes = typeMap[type].getAttributeTypeMap(); - let instance: { [index: string]: any } = {}; - for (let index in attributeTypes) { - let attributeType = attributeTypes[index]; - instance[attributeType.baseName] = ObjectSerializer.serialize(data[attributeType.name], attributeType.type); - } - return instance; - } - } - - public static deserialize(data: any, type: string) { - // polymorphism may change the actual type. - type = ObjectSerializer.findCorrectType(data, type); - if (data == undefined) { - return data; - } else if (primitives.indexOf(type.toLowerCase()) !== -1) { - return data; - } else if (type.lastIndexOf("Array<", 0) === 0) { // string.startsWith pre es6 - let subType: string = type.replace("Array<", ""); // Array => Type> - subType = subType.substring(0, subType.length - 1); // Type> => Type - let transformedData: any[] = []; - for (let index in data) { - let date = data[index]; - transformedData.push(ObjectSerializer.deserialize(date, subType)); - } - return transformedData; - } else if (type === "Date") { - return new Date(data); - } else { - if (enumsMap[type]) {// is Enum - return data; - } - - if (!typeMap[type]) { // dont know the type - return data; - } - let instance = new typeMap[type](); - let attributeTypes = typeMap[type].getAttributeTypeMap(); - for (let index in attributeTypes) { - let attributeType = attributeTypes[index]; - instance[attributeType.name] = ObjectSerializer.deserialize(data[attributeType.baseName], attributeType.type); - } - return instance; - } - } -} - -export class AppModel { - 'name'?: string; - 'internalName'?: string; - 'version'?: string; - 'inputParamDefs'?: Array; - 'outputParamDefs'?: Array; - 'state'?: string; - 'links'?: { [key: string]: string; }; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{ name: string, baseName: string, type: string }> = [ - { - "name": "name", - "baseName": "name", - "type": "string" - }, - { - "name": "internalName", - "baseName": "internal_name", - "type": "string" - }, - { - "name": "version", - "baseName": "version", - "type": "string" - }, - { - "name": "inputParamDefs", - "baseName": "input_param_defs", - "type": "Array" - }, - { - "name": "outputParamDefs", - "baseName": "output_param_defs", - "type": "Array" - }, - { - "name": "state", - "baseName": "state", - "type": "string" - }, - { - "name": "links", - "baseName": "links", - "type": "{ [key: string]: string; }" - }]; - - static getAttributeTypeMap() { - return AppModel.attributeTypeMap; - } -} - -export class AppModelParameterDefinition { - 'name'?: string; - 'type'?: string; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{ name: string, baseName: string, type: string }> = [ - { - "name": "name", - "baseName": "name", - "type": "string" - }, - { - "name": "type", - "baseName": "type", - "type": "string" - }]; - - static getAttributeTypeMap() { - return AppModelParameterDefinition.attributeTypeMap; - } -} - -export class TokenModel { - 'tokenType'?: string; - 'accessToken'?: string; - 'expiresIn'?: number; - 'expiresOn'?: number; - 'tokenId'?: string; - 'namespace'?: string; - - static discriminator: string | undefined = undefined; - - static attributeTypeMap: Array<{ name: string, baseName: string, type: string }> = [ - { - "name": "tokenType", - "baseName": "token_type", - "type": "string" - }, - { - "name": "accessToken", - "baseName": "access_token", - "type": "string" - }, - { - "name": "expiresIn", - "baseName": "expires_in", - "type": "number" - }, - { - "name": "expiresOn", - "baseName": "expires_on", - "type": "number" - }, - { - "name": "tokenId", - "baseName": "token_id", - "type": "string" - }, - { - "name": "namespace", - "baseName": "namespace", - "type": "string" - }]; - - static getAttributeTypeMap() { - return TokenModel.attributeTypeMap; - } -} - - -let enumsMap: { [index: string]: any } = { -} - -let typeMap: { [index: string]: any } = { - "AppModel": AppModel, - "AppModelParameterDefinition": AppModelParameterDefinition, - "TokenModel": TokenModel, -} - -export interface Authentication { - /** - * Apply authentication settings to header and query params. - */ - applyToRequest(requestOptions: localVarRequest.Options): void; -} - -export class HttpBasicAuth implements Authentication { - public username: string = ''; - public password: string = ''; - - applyToRequest(requestOptions: localVarRequest.Options): void { - requestOptions.auth = { - username: this.username, password: this.password - } - } -} - -export class ApiKeyAuth implements Authentication { - public apiKey: string = ''; - - constructor(private location: string, private paramName: string) { - } - - applyToRequest(requestOptions: localVarRequest.Options): void { - if (this.location == "query") { - (requestOptions.qs)[this.paramName] = this.apiKey; - } else if (this.location == "header" && requestOptions && requestOptions.headers) { - requestOptions.headers[this.paramName] = this.apiKey; - } - } -} - -export class OAuth implements Authentication { - public accessToken: string = ''; - - applyToRequest(requestOptions: localVarRequest.Options): void { - if (requestOptions && requestOptions.headers) { - requestOptions.headers["Authorization"] = "Bearer " + this.accessToken; - } - } -} - -export class VoidAuth implements Authentication { - public username: string = ''; - public password: string = ''; - - applyToRequest(_: localVarRequest.Options): void { - // Do nothing - } -} - -export enum AppRouterApiApiKeys { -} - -export class AppRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders: any = {}; - protected _useQuerystring: boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'auth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: AppRouterApiApiKeys, value: string) { - (this.authentications as any)[AppRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.auth.username = username; - } - - set password(password: string) { - this.authentications.auth.password = password; - } - /** - * - * @summary ApiV1AppByNameAndVersion_DELETE - * @param name - * @param version - * @param {*} [options] Override http request options. - */ - public apiV1AppByNameAndVersionDelete(name: string, version: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/app/{name}/{version}' - .replace('{' + 'name' + '}', encodeURIComponent(String(name))) - .replace('{' + 'version' + '}', encodeURIComponent(String(version))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'name' is not null or undefined - if (name === null || name === undefined) { - throw new Error('Required parameter name was null or undefined when calling apiV1AppByNameAndVersionDelete.'); - } - - // verify required parameter 'version' is not null or undefined - if (version === null || version === undefined) { - throw new Error('Required parameter version was null or undefined when calling apiV1AppByNameAndVersionDelete.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'DELETE', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary GetAppByNameVersion - * @param name - * @param version - * @param {*} [options] Override http request options. - */ - public apiV1AppByNameAndVersionGet(name: string, version: string, options: any = {}): Promise<{ response: http.IncomingMessage; body: AppModel; }> { - const localVarPath = this.basePath + '/api/v1/app/{name}/{version}' - .replace('{' + 'name' + '}', encodeURIComponent(String(name))) - .replace('{' + 'version' + '}', encodeURIComponent(String(version))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'name' is not null or undefined - if (name === null || name === undefined) { - throw new Error('Required parameter name was null or undefined when calling apiV1AppByNameAndVersionGet.'); - } - - // verify required parameter 'version' is not null or undefined - if (version === null || version === undefined) { - throw new Error('Required parameter version was null or undefined when calling apiV1AppByNameAndVersionGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: AppModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "AppModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary ApiV1AppByName_GET - * @param name - * @param {*} [options] Override http request options. - */ - public apiV1AppByNameGet(name: string, options: any = {}): Promise<{ response: http.IncomingMessage; body: Array; }> { - const localVarPath = this.basePath + '/api/v1/app/{name}' - .replace('{' + 'name' + '}', encodeURIComponent(String(name))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'name' is not null or undefined - if (name === null || name === undefined) { - throw new Error('Required parameter name was null or undefined when calling apiV1AppByNameGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: Array; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "Array"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary ApiV1App_GET - * @param {*} [options] Override http request options. - */ - public apiV1AppGet(options: any = {}): Promise<{ response: http.IncomingMessage; body: Array; }> { - const localVarPath = this.basePath + '/api/v1/app'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: Array; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "Array"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary UpdateApp - * @param spec - * @param _package - * @param {*} [options] Override http request options. - */ - public apiV1AppPatch(spec?: Buffer, _package?: Buffer, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/app'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - if (spec !== undefined) { - localVarFormParams['Spec'] = spec; - } - localVarUseFormData = true; - - if (_package !== undefined) { - localVarFormParams['Package'] = _package; - } - localVarUseFormData = true; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'PATCH', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary CreateApp - * @param spec - * @param _package - * @param {*} [options] Override http request options. - */ - public apiV1AppPost(spec?: Buffer, _package?: Buffer, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/app'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - if (spec !== undefined) { - localVarFormParams['Spec'] = spec; - } - localVarUseFormData = true; - - if (_package !== undefined) { - localVarFormParams['Package'] = _package; - } - localVarUseFormData = true; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary GetAppSwagger - * @param name - * @param version - * @param {*} [options] Override http request options. - */ - public apiV1AppSwaggerJsonByNameAndVersionGet(name: string, version: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/app/{name}/{version}/swagger.json' - .replace('{' + 'name' + '}', encodeURIComponent(String(name))) - .replace('{' + 'version' + '}', encodeURIComponent(String(version))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'name' is not null or undefined - if (name === null || name === undefined) { - throw new Error('Required parameter name was null or undefined when calling apiV1AppSwaggerJsonByNameAndVersionGet.'); - } - - // verify required parameter 'version' is not null or undefined - if (version === null || version === undefined) { - throw new Error('Required parameter version was null or undefined when calling apiV1AppSwaggerJsonByNameAndVersionGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} -export enum FileRouterApiApiKeys { -} - -export class FileRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders: any = {}; - protected _useQuerystring: boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'auth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: FileRouterApiApiKeys, value: string) { - (this.authentications as any)[FileRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.auth.username = username; - } - - set password(password: string) { - this.authentications.auth.password = password; - } - /** - * - * @summary ApiV1FilesByFilePath_GET - * @param filePath - * @param {*} [options] Override http request options. - */ - public apiV1FilesByFilePathGet(filePath: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/files/{filePath}' - .replace('{' + 'filePath' + '}', encodeURIComponent(String(filePath))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'filePath' is not null or undefined - if (filePath === null || filePath === undefined) { - throw new Error('Required parameter filePath was null or undefined when calling apiV1FilesByFilePathGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary ApiV1FilesFilelistByPodName_GET - * @param podName - * @param containerName - * @param {*} [options] Override http request options. - */ - public apiV1FilesFilelistByPodNameAndContainerNameGet(podName: string, containerName: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/files/filelist/{podName}/{containerName}' - .replace('{' + 'podName' + '}', encodeURIComponent(String(podName))) - .replace('{' + 'containerName' + '}', encodeURIComponent(String(containerName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'podName' is not null or undefined - if (podName === null || podName === undefined) { - throw new Error('Required parameter podName was null or undefined when calling apiV1FilesFilelistByPodNameAndContainerNameGet.'); - } - - // verify required parameter 'containerName' is not null or undefined - if (containerName === null || containerName === undefined) { - throw new Error('Required parameter containerName was null or undefined when calling apiV1FilesFilelistByPodNameAndContainerNameGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary FilesByFilePath_GET - * @param filePath - * @param {*} [options] Override http request options. - */ - public filesByFilePathGet(filePath: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/files/{filePath}' - .replace('{' + 'filePath' + '}', encodeURIComponent(String(filePath))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'filePath' is not null or undefined - if (filePath === null || filePath === undefined) { - throw new Error('Required parameter filePath was null or undefined when calling filesByFilePathGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary FilesFilelistByPodNameAndContainerName_GET - * @param podName - * @param containerName - * @param {*} [options] Override http request options. - */ - public filesFilelistByPodNameAndContainerNameGet(podName: string, containerName: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/files/filelist/{podName}/{containerName}' - .replace('{' + 'podName' + '}', encodeURIComponent(String(podName))) - .replace('{' + 'containerName' + '}', encodeURIComponent(String(containerName))); - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - // verify required parameter 'podName' is not null or undefined - if (podName === null || podName === undefined) { - throw new Error('Required parameter podName was null or undefined when calling filesFilelistByPodNameAndContainerNameGet.'); - } - - // verify required parameter 'containerName' is not null or undefined - if (containerName === null || containerName === undefined) { - throw new Error('Required parameter containerName was null or undefined when calling filesFilelistByPodNameAndContainerNameGet.'); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} -export enum HealthRouterApiApiKeys { -} - -export class HealthRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders: any = {}; - protected _useQuerystring: boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'auth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: HealthRouterApiApiKeys, value: string) { - (this.authentications as any)[HealthRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.auth.username = username; - } - - set password(password: string) { - this.authentications.auth.password = password; - } - /** - * - * @summary ApiV1Health_GET - * @param query - * @param {*} [options] Override http request options. - */ - public apiV1HealthGet(query?: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/health'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - if (query !== undefined) { - localVarQueryParameters['query'] = ObjectSerializer.serialize(query, "string"); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary ApiV1Health_POST - * @param {*} [options] Override http request options. - */ - public apiV1HealthPost(options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/api/v1/health'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Health_GET - * @param query - * @param {*} [options] Override http request options. - */ - public healthGet(query?: string, options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/health'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - if (query !== undefined) { - localVarQueryParameters['query'] = ObjectSerializer.serialize(query, "string"); - } - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'GET', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Health_POST - * @param {*} [options] Override http request options. - */ - public healthPost(options: any = {}): Promise<{ response: http.IncomingMessage; body?: any; }> { - const localVarPath = this.basePath + '/health'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body?: any; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} -export enum TokenRouterApiApiKeys { -} - -export class TokenRouterApi { - protected _basePath = defaultBasePath; - protected defaultHeaders: any = {}; - protected _useQuerystring: boolean = false; - - protected authentications = { - 'default': new VoidAuth(), - 'auth': new HttpBasicAuth(), - } - - constructor(basePath?: string); - constructor(username: string, password: string, basePath?: string); - constructor(basePathOrUsername: string, password?: string, basePath?: string) { - if (password) { - this.username = basePathOrUsername; - this.password = password - if (basePath) { - this.basePath = basePath; - } - } else { - if (basePathOrUsername) { - this.basePath = basePathOrUsername - } - } - } - - set useQuerystring(value: boolean) { - this._useQuerystring = value; - } - - set basePath(basePath: string) { - this._basePath = basePath; - } - - get basePath() { - return this._basePath; - } - - public setDefaultAuthentication(auth: Authentication) { - this.authentications.default = auth; - } - - public setApiKey(key: TokenRouterApiApiKeys, value: string) { - (this.authentications as any)[TokenRouterApiApiKeys[key]].apiKey = value; - } - set username(username: string) { - this.authentications.auth.username = username; - } - - set password(password: string) { - this.authentications.auth.password = password; - } - /** - * - * @summary ApiV1Token_POST - * @param {*} [options] Override http request options. - */ - public apiV1TokenPost(options: any = {}): Promise<{ response: http.IncomingMessage; body: TokenModel; }> { - const localVarPath = this.basePath + '/api/v1/token'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: TokenModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "TokenModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } - /** - * - * @summary Token_POST - * @param {*} [options] Override http request options. - */ - public tokenPost(options: any = {}): Promise<{ response: http.IncomingMessage; body: TokenModel; }> { - const localVarPath = this.basePath + '/token'; - let localVarQueryParameters: any = {}; - let localVarHeaderParams: any = (Object).assign({}, this.defaultHeaders); - let localVarFormParams: any = {}; - - (Object).assign(localVarHeaderParams, options.headers); - - let localVarUseFormData = false; - - let localVarRequestOptions: localVarRequest.Options = { - method: 'POST', - qs: localVarQueryParameters, - headers: localVarHeaderParams, - uri: localVarPath, - useQuerystring: this._useQuerystring, - json: true, - }; - - this.authentications.auth.applyToRequest(localVarRequestOptions); - - this.authentications.default.applyToRequest(localVarRequestOptions); - - if (Object.keys(localVarFormParams).length) { - if (localVarUseFormData) { - (localVarRequestOptions).formData = localVarFormParams; - } else { - localVarRequestOptions.form = localVarFormParams; - } - } - return new Promise<{ response: http.IncomingMessage; body: TokenModel; }>((resolve, reject) => { - localVarRequest(localVarRequestOptions, (error, response, body) => { - if (error) { - reject(error); - } else { - body = ObjectSerializer.deserialize(body, "TokenModel"); - if (response.statusCode && response.statusCode >= 200 && response.statusCode <= 299) { - resolve({ response: response, body: body }); - } else { - reject({ response: response, body: body }); - } - } - }); - }); - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/controller/clusterControllerApi.ts b/extensions/big-data-cluster/src/bigDataCluster/controller/clusterControllerApi.ts deleted file mode 100644 index 104dbda87b..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/controller/clusterControllerApi.ts +++ /dev/null @@ -1,455 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as request from 'request'; -import { authenticateKerberos, getHostAndPortFromEndpoint } from '../auth'; -import { BdcRouterApi, Authentication, EndpointModel, BdcStatusModel, DefaultApi } from './apiGenerated'; -import { TokenRouterApi } from './clusterApiGenerated2'; -import * as nls from 'vscode-nls'; -import { ConnectControllerDialog, ConnectControllerModel } from '../dialog/connectControllerDialog'; -import { getIgnoreSslVerificationConfigSetting } from '../utils'; -import { IClusterController, AuthType, IEndPointsResponse, IHttpResponse } from 'bdc'; - -const localize = nls.loadMessageBundle(); - -const DEFAULT_KNOX_USERNAME = 'root'; - -class SslAuth implements Authentication { - constructor() { } - - applyToRequest(requestOptions: request.Options): void { - requestOptions.rejectUnauthorized = !getIgnoreSslVerificationConfigSetting(); - } -} - -export class KerberosAuth extends SslAuth implements Authentication { - - constructor(public kerberosToken: string) { - super(); - } - - override applyToRequest(requestOptions: request.Options): void { - super.applyToRequest(requestOptions); - if (requestOptions && requestOptions.headers) { - requestOptions.headers['Authorization'] = `Negotiate ${this.kerberosToken}`; - } - requestOptions.auth = undefined; - } -} -export class BasicAuth extends SslAuth implements Authentication { - constructor(public username: string, public password: string) { - super(); - } - - override applyToRequest(requestOptions: request.Options): void { - super.applyToRequest(requestOptions); - requestOptions.auth = { - username: this.username, password: this.password - }; - } -} - -export class OAuthWithSsl extends SslAuth implements Authentication { - public accessToken: string = ''; - - override applyToRequest(requestOptions: request.Options): void { - super.applyToRequest(requestOptions); - if (requestOptions && requestOptions.headers) { - requestOptions.headers['Authorization'] = `Bearer ${this.accessToken}`; - } - requestOptions.auth = undefined; - } -} - -class BdcApiWrapper extends BdcRouterApi { - constructor(basePathOrUsername: string, password: string, basePath: string, auth: Authentication) { - if (password) { - super(basePathOrUsername, password, basePath); - } else { - super(basePath, undefined, undefined); - } - this.authentications.default = auth; - } -} -class DefaultApiWrapper extends DefaultApi { - constructor(basePathOrUsername: string, password: string, basePath: string, auth: Authentication) { - if (password) { - super(basePathOrUsername, password, basePath); - } else { - super(basePath, undefined, undefined); - } - this.authentications.default = auth; - } -} - -export class ClusterController implements IClusterController { - - private _authPromise: Promise; - private _url: string; - private readonly _dialog: ConnectControllerDialog; - private _connectionPromise: Promise; - - constructor(url: string, - private _authType: AuthType, - private _username?: string, - private _password?: string - ) { - if (!url || (_authType === 'basic' && (!_username || !_password))) { - throw new Error('Missing required inputs for Cluster controller API (URL, username, password)'); - } - this._url = adjustUrl(url); - if (this._authType === 'basic') { - this._authPromise = Promise.resolve(new BasicAuth(_username, _password)); - } else { - this._authPromise = this.requestTokenUsingKerberos(); - } - this._dialog = new ConnectControllerDialog(new ConnectControllerModel( - { - url: this._url, - auth: this._authType, - username: this._username, - password: this._password - })); - } - - public get url(): string { - return this._url; - } - - public get authType(): AuthType { - return this._authType; - } - - public get username(): string | undefined { - return this._username; - } - - public get password(): string | undefined { - return this._password; - } - - private async requestTokenUsingKerberos(): Promise { - let supportsKerberos = await this.verifyKerberosSupported(); - if (!supportsKerberos) { - throw new Error(localize('error.no.activedirectory', "This cluster does not support Windows authentication")); - } - - try { - - // AD auth is available, login to keberos and convert to token auth for all future calls - let host = getHostAndPortFromEndpoint(this._url).host; - let kerberosToken = await authenticateKerberos(host); - let tokenApi = new TokenRouterApi(this._url); - tokenApi.setDefaultAuthentication(new KerberosAuth(kerberosToken)); - let result = await tokenApi.apiV1TokenPost(); - let auth = new OAuthWithSsl(); - auth.accessToken = result.body.accessToken; - return auth; - } catch (error) { - let controllerErr = new ControllerError(error, localize('bdc.error.tokenPost', "Error during authentication")); - if (controllerErr.code === 401) { - throw new Error(localize('bdc.error.unauthorized', "You do not have permission to log into this cluster using Windows Authentication")); - } - // Else throw the error as-is - throw controllerErr; - } - } - - /** - * Verify that this cluster supports Kerberos authentication. It does this by sending a request to the Token API route - * without any credentials and verifying that it gets a 401 response back with a Negotiate www-authenticate header. - */ - private async verifyKerberosSupported(): Promise { - let tokenApi = new TokenRouterApi(this._url); - tokenApi.setDefaultAuthentication(new SslAuth()); - try { - await tokenApi.apiV1TokenPost(); - console.warn(`Token API returned success without any auth while verifying Kerberos support for BDC Cluster ${this._url}`); - // If we get to here, the route for tokens doesn't require auth which is an unexpected error state - return false; - } - catch (error) { - if (!error.response) { - console.warn(`No response when verifying Kerberos support for BDC Cluster ${this._url} - ${error}`); - return false; - } - - if (error.response.statusCode !== 401) { - console.warn(`Got unexpected status code ${error.response.statusCode} when verifying Kerberos support for BDC Cluster ${this._url}`); - return false; - } - - const auths = error.response.headers['www-authenticate'] as string[] ?? []; - if (auths.includes('Negotiate')) { - return true; - } - console.warn(`Didn't get expected Negotiate auth type when verifying Kerberos support for BDC Cluster ${this.url}. Supported types : ${auths.join(', ')}`); - return false; - } - } - - public async getKnoxUsername(defaultUsername: string): Promise { - // This all is necessary because prior to CU5 BDC deployments all had the same default username for - // accessing the Knox gateway. But in the allowRunAsRoot setting was added and defaulted to false - so - // if that exists and is false then we use the username instead. - // Note that the SQL username may not necessarily be correct here either - but currently this is what - // we're requiring to run Notebooks in a BDC - const config = await this.getClusterConfig(); - return config.spec?.spec?.security?.allowRunAsRoot === false ? defaultUsername : DEFAULT_KNOX_USERNAME; - } - - public async getClusterConfig(promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.getClusterConfigImpl, - promptConnect, - localize('bdc.error.getClusterConfig', "Error retrieving cluster config from {0}", this._url)); - } - - private async getClusterConfigImpl(self: ClusterController): Promise { - let auth = await self._authPromise; - let endPointApi = new BdcApiWrapper(self._username, self._password, self._url, auth); - let options: any = {}; - - let result = await endPointApi.getCluster(options); - return { - response: result.response as IHttpResponse, - spec: JSON.parse(result.body.spec) - }; - } - - public async getEndPoints(promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.getEndpointsImpl, - promptConnect, - localize('bdc.error.getEndPoints', "Error retrieving endpoints from {0}", this._url)); - } - - private async getEndpointsImpl(self: ClusterController): Promise { - let auth = await self._authPromise; - let endPointApi = new BdcApiWrapper(self._username, self._password, self._url, auth); - let options: any = {}; - - let result = await endPointApi.endpointsGet(options); - return { - response: result.response as IHttpResponse, - endPoints: result.body as EndpointModel[] - }; - } - - public async getBdcStatus(promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.getBdcStatusImpl, - promptConnect, - localize('bdc.error.getBdcStatus', "Error retrieving BDC status from {0}", this._url)); - } - - private async getBdcStatusImpl(self: ClusterController): Promise { - let auth = await self._authPromise; - const bdcApi = new BdcApiWrapper(self._username, self._password, self._url, auth); - - const bdcStatus = await bdcApi.getBdcStatus('', '', /*all*/ true); - return { - response: bdcStatus.response, - bdcStatus: bdcStatus.body - }; - } - - public async mountHdfs(mountPath: string, remoteUri: string, credentials: {}, promptConnection: boolean = false): Promise { - return await this.withConnectRetry( - this.mountHdfsImpl, - promptConnection, - localize('bdc.error.mountHdfs', "Error creating mount"), - mountPath, - remoteUri, - credentials); - } - - private async mountHdfsImpl(self: ClusterController, mountPath: string, remoteUri: string, credentials: {}): Promise { - let auth = await self._authPromise; - const api = new DefaultApiWrapper(self._username, self._password, self._url, auth); - - const mountStatus = await api.createMount('', '', remoteUri, mountPath, credentials); - return { - response: mountStatus.response, - status: mountStatus.body - }; - } - - public async getMountStatus(mountPath?: string, promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.getMountStatusImpl, - promptConnect, - localize('bdc.error.statusHdfs', "Error getting mount status"), - mountPath); - } - - private async getMountStatusImpl(self: ClusterController, mountPath?: string): Promise { - const auth = await self._authPromise; - const api = new DefaultApiWrapper(self._username, self._password, self._url, auth); - - const mountStatus = await api.listMounts('', '', mountPath); - return { - response: mountStatus.response, - mount: mountStatus.body ? JSON.parse(mountStatus.body) : undefined - }; - } - - public async refreshMount(mountPath: string, promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.refreshMountImpl, - promptConnect, - localize('bdc.error.refreshHdfs', "Error refreshing mount"), - mountPath); - } - - private async refreshMountImpl(self: ClusterController, mountPath: string): Promise { - const auth = await self._authPromise; - const api = new DefaultApiWrapper(self._username, self._password, self._url, auth); - - const mountStatus = await api.refreshMount('', '', mountPath); - return { - response: mountStatus.response, - status: mountStatus.body - }; - } - - public async deleteMount(mountPath: string, promptConnect: boolean = false): Promise { - return await this.withConnectRetry( - this.deleteMountImpl, - promptConnect, - localize('bdc.error.deleteHdfs', "Error deleting mount"), - mountPath); - } - - private async deleteMountImpl(self: ClusterController, mountPath: string): Promise { - let auth = await self._authPromise; - const api = new DefaultApiWrapper(self._username, self._password, self._url, auth); - - const mountStatus = await api.deleteMount('', '', mountPath); - return { - response: mountStatus.response, - status: mountStatus.body - }; - } - - /** - * Helper function that wraps a function call in a try/catch and if promptConnect is true - * will prompt the user to re-enter connection information and if that succeeds updates - * this with the new information. - * @param f The API function we're wrapping - * @param promptConnect Whether to actually prompt for connection on failure - * @param errorMessage The message to include in the wrapped error thrown - * @param args The args to pass to the function - */ - private async withConnectRetry(f: (...args: any[]) => Promise, promptConnect: boolean, errorMessage: string, ...args: any[]): Promise { - try { - try { - return await f(this, ...args); - } catch (error) { - if (promptConnect) { - // We don't want to open multiple dialogs here if multiple calls come in the same time so check - // and see if we have are actively waiting on an open dialog to return and if so then just wait - // on that promise. - if (!this._connectionPromise) { - this._connectionPromise = this._dialog.showDialog(); - } - const controller = await this._connectionPromise; - if (controller) { - this._username = controller._username; - this._password = controller._password; - this._url = controller._url; - this._authType = controller._authType; - this._authPromise = controller._authPromise; - } - return await f(this, args); - } - throw error; - } - } catch (error) { - throw new ControllerError(error, errorMessage); - } finally { - this._connectionPromise = undefined; - } - } -} - -/** - * Fixes missing protocol and wrong character for port entered by user - */ -function adjustUrl(url: string): string { - if (!url) { - return undefined; - } - - url = url.trim().replace(/ /g, '').replace(/,(\d+)$/, ':$1'); - if (!url.includes('://')) { - url = `https://${url}`; - } - return url; -} - -export interface IClusterRequest { - url: string; - username: string; - password?: string; - method?: string; -} - -export interface IBdcStatusResponse { - response: IHttpResponse; - bdcStatus: BdcStatusModel; -} - -export enum MountState { - Creating = 'Creating', - Ready = 'Ready', - Error = 'Error' -} - -export interface MountInfo { - mount: string; - remote: string; - state: MountState; - error?: string; -} - -export interface MountResponse { - response: IHttpResponse; - status: any; -} -export interface MountStatusResponse { - response: IHttpResponse; - mount: MountInfo[]; -} - -export class ControllerError extends Error { - public code?: number; - public reason?: string; - public address?: string; - public statusMessage?: string; - /** - * - * @param error The original error to wrap - * @param messagePrefix Optional text to prefix the error message with - */ - constructor(error: any, messagePrefix?: string) { - super(messagePrefix); - // Pull out the response information containing details about the failure - if (error.response) { - this.code = error.response.statusCode; - this.message += `${error.response.statusMessage ? ` - ${error.response.statusMessage}` : ''}` || ''; - this.address = error.response.url || ''; - this.statusMessage = error.response.statusMessage; - } - else if (error.message) { - this.message += ` - ${error.message}`; - } - - // The body message contains more specific information about the failure - if (error.body && error.body.reason) { - this.message += ` - ${error.body.reason}`; - } - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/addControllerDialog.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/addControllerDialog.ts deleted file mode 100644 index dd9b0d473e..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/addControllerDialog.ts +++ /dev/null @@ -1,229 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { ClusterController, ControllerError } from '../controller/clusterControllerApi'; -import { ControllerTreeDataProvider } from '../tree/controllerTreeDataProvider'; -import { BdcDashboardOptions } from './bdcDashboardModel'; -import { ControllerNode } from '../tree/controllerTreeNode'; -import { ManageControllerCommand } from '../../commands'; -import * as loc from '../localizedConstants'; -import { AuthType } from 'bdc'; - -function getAuthCategory(name: AuthType): azdata.CategoryValue { - if (name === 'basic') { - return { name: name, displayName: loc.basic }; - } - return { name: name, displayName: loc.windowsAuth }; -} - -export class AddControllerDialogModel { - - private _canceled = false; - private _authTypes: azdata.CategoryValue[]; - constructor( - public treeDataProvider: ControllerTreeDataProvider, - public node?: ControllerNode, - public prefilledUrl?: string, - public prefilledAuth?: azdata.CategoryValue, - public prefilledUsername?: string, - public prefilledPassword?: string, - public prefilledRememberPassword?: boolean - ) { - this.prefilledUrl = prefilledUrl || (node && node['url']); - this.prefilledAuth = prefilledAuth; - if (!prefilledAuth) { - let auth = (node && node['auth']) || 'basic'; - this.prefilledAuth = getAuthCategory(auth); - } - this.prefilledUsername = prefilledUsername || (node && node['username']); - this.prefilledPassword = prefilledPassword || (node && node['password']); - this.prefilledRememberPassword = prefilledRememberPassword || (node && node['rememberPassword']); - } - - public get authCategories(): azdata.CategoryValue[] { - if (!this._authTypes) { - this._authTypes = [getAuthCategory('basic'), getAuthCategory('integrated')]; - } - return this._authTypes; - } - - public async onComplete(url: string, auth: AuthType, username: string, password: string, rememberPassword: boolean): Promise { - try { - - if (auth === 'basic') { - // Verify username and password as we can't make them required in the UI - if (!username) { - throw new Error(loc.usernameRequired); - } else if (!password) { - throw new Error(loc.passwordRequired); - } - } - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - let controller = new ClusterController(url, auth, username, password); - let response = await controller.getEndPoints(); - if (response && response.endPoints) { - if (this._canceled) { - return; - } - this.treeDataProvider.addOrUpdateController(url, auth, username, password, rememberPassword); - vscode.commands.executeCommand(ManageControllerCommand, { url: url, auth: auth, username: username, password: password }); - await this.treeDataProvider.saveControllers(); - } - } catch (error) { - // Ignore the error if we cancelled the request since we can't stop the actual request from completing - if (!this._canceled) { - throw error; - } - } - - } - - public async onError(error: ControllerError): Promise { - // implement - } - - public async onCancel(): Promise { - this._canceled = true; - if (this.node) { - this.node.refresh(); - } - } -} - -export class AddControllerDialog { - - private dialog: azdata.window.Dialog; - private uiModelBuilder: azdata.ModelBuilder; - - private urlInputBox: azdata.InputBoxComponent; - private authDropdown: azdata.DropDownComponent; - private usernameInputBox: azdata.InputBoxComponent; - private passwordInputBox: azdata.InputBoxComponent; - private rememberPwCheckBox: azdata.CheckBoxComponent; - - constructor(private model: AddControllerDialogModel) { - } - - public showDialog(): void { - this.createDialog(); - azdata.window.openDialog(this.dialog); - } - - private createDialog(): void { - this.dialog = azdata.window.createModelViewDialog(loc.addNewController); - this.dialog.registerContent(async view => { - this.uiModelBuilder = view.modelBuilder; - - this.urlInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.url.toLocaleLowerCase(), - value: this.model.prefilledUrl - }).component(); - this.authDropdown = this.uiModelBuilder.dropDown().withProps({ - values: this.model.authCategories, - value: this.model.prefilledAuth, - editable: false, - }).component(); - this.authDropdown.onValueChanged(e => this.onAuthChanged()); - this.usernameInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.usernameRequired.toLocaleLowerCase(), - value: this.model.prefilledUsername - }).component(); - this.passwordInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.password, - inputType: 'password', - value: this.model.prefilledPassword - }) - .component(); - this.rememberPwCheckBox = this.uiModelBuilder.checkBox() - .withProps({ - label: loc.rememberPassword, - checked: this.model.prefilledRememberPassword - }).component(); - - let formModel = this.uiModelBuilder.formContainer() - .withFormItems([{ - components: [ - { - component: this.urlInputBox, - title: loc.clusterUrl, - required: true - }, { - component: this.authDropdown, - title: loc.authType, - required: true - }, { - component: this.usernameInputBox, - title: loc.username, - required: false - }, { - component: this.passwordInputBox, - title: loc.password, - required: false - }, { - component: this.rememberPwCheckBox, - title: '' - } - ], - title: '' - }]).withLayout({ width: '100%' }).component(); - this.onAuthChanged(); - await view.initializeModel(formModel); - this.urlInputBox.focus(); - }); - - this.dialog.registerCloseValidator(async () => await this.validate()); - this.dialog.cancelButton.onClick(async () => await this.cancel()); - this.dialog.okButton.label = loc.add; - this.dialog.cancelButton.label = loc.cancel; - } - - private get authValue(): AuthType { - return (this.authDropdown.value).name as AuthType; - } - - private onAuthChanged(): void { - let isBasic = this.authValue === 'basic'; - this.usernameInputBox.enabled = isBasic; - this.passwordInputBox.enabled = isBasic; - this.rememberPwCheckBox.enabled = isBasic; - if (!isBasic) { - this.usernameInputBox.value = ''; - this.passwordInputBox.value = ''; - } - } - - private async validate(): Promise { - let url = this.urlInputBox && this.urlInputBox.value; - let auth = this.authValue; - let username = this.usernameInputBox && this.usernameInputBox.value; - let password = this.passwordInputBox && this.passwordInputBox.value; - let rememberPassword = this.passwordInputBox && !!this.rememberPwCheckBox.checked; - - try { - await this.model.onComplete(url, auth, username, password, rememberPassword); - return true; - } catch (error) { - this.dialog.message = { - text: (typeof error === 'string') ? error : error.message, - level: azdata.window.MessageLevel.Error - }; - if (this.model && this.model.onError) { - await this.model.onError(error as ControllerError); - } - return false; - } - } - - private async cancel(): Promise { - if (this.model && this.model.onCancel) { - await this.model.onCancel(); - } - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboard.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboard.ts deleted file mode 100644 index 2efc98e1c7..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboard.ts +++ /dev/null @@ -1,106 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { BdcDashboardModel, BdcErrorEvent } from './bdcDashboardModel'; -import { BdcServiceStatusPage } from './bdcServiceStatusPage'; -import { BdcDashboardOverviewPage } from './bdcDashboardOverviewPage'; -import { BdcStatusModel, ServiceStatusModel } from '../controller/apiGenerated'; -import { getServiceNameDisplayText, showErrorMessage, getHealthStatusDotIcon } from '../utils'; -import { HdfsDialogCancelledError } from './hdfsDialogBase'; -import { InitializingComponent } from './intializingComponent'; -import * as loc from '../localizedConstants'; - -export class BdcDashboard extends InitializingComponent { - - private dashboard: azdata.window.ModelViewDashboard; - - private modelView: azdata.ModelView; - - private createdServicePages: Map = new Map(); - private overviewTab: azdata.DashboardTab; - - constructor(private title: string, private model: BdcDashboardModel) { - super(); - model.onDidUpdateBdcStatus(bdcStatus => this.eventuallyRunOnInitialized(() => this.handleBdcStatusUpdate(bdcStatus))); - model.onBdcError(errorEvent => this.eventuallyRunOnInitialized(() => this.handleError(errorEvent))); - } - - public async showDashboard(): Promise { - await this.createDashboard(); - await this.dashboard.open(); - } - - private async createDashboard(): Promise { - this.dashboard = azdata.window.createModelViewDashboard(this.title, 'BdcDashboard', { alwaysShowTabs: true }); - this.dashboard.registerTabs(async (modelView: azdata.ModelView) => { - this.modelView = modelView; - - const overviewPage = new BdcDashboardOverviewPage(this.model, modelView, this.dashboard); - this.overviewTab = { - title: loc.bdcOverview, - id: 'overview-tab', - content: overviewPage.container, - toolbar: overviewPage.toolbarContainer - }; - return [ - this.overviewTab - ]; - }); - this.initialized = true; - - // Now that we've created the UI load data from the model in case it already had data - this.handleBdcStatusUpdate(this.model.bdcStatus); - } - - private handleBdcStatusUpdate(bdcStatus?: BdcStatusModel): void { - if (!bdcStatus) { - return; - } - this.updateServicePages(bdcStatus.services); - } - - private handleError(errorEvent: BdcErrorEvent): void { - if (errorEvent.errorType !== 'general') { - return; - } - // We don't want to show an error for the connection dialog being - // canceled since that's a normal case. - if (!(errorEvent.error instanceof HdfsDialogCancelledError)) { - showErrorMessage(errorEvent.error.message); - } - } - - /** - * Update the service tab pages, creating any new ones as necessary - */ - private updateServicePages(services?: ServiceStatusModel[]): void { - if (services) { - // Create a service page for each new service. We currently don't support services being removed. - services.forEach(s => { - const existingPage = this.createdServicePages.get(s.serviceName); - if (existingPage) { - existingPage.icon = getHealthStatusDotIcon(s.healthStatus); - } else { - const serviceStatusPage = new BdcServiceStatusPage(s.serviceName, this.model, this.modelView); - const newTab = { - title: getServiceNameDisplayText(s.serviceName), - id: s.serviceName, - icon: getHealthStatusDotIcon(s.healthStatus), - content: serviceStatusPage.container, - toolbar: serviceStatusPage.toolbarContainer - }; - this.createdServicePages.set(s.serviceName, newTab); - } - }); - this.dashboard.updateTabs([ - this.overviewTab, - { - title: loc.clusterDetails, - tabs: Array.from(this.createdServicePages.values()) - }]); - } - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardModel.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardModel.ts deleted file mode 100644 index 5e2c1ae852..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardModel.ts +++ /dev/null @@ -1,182 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { ClusterController } from '../controller/clusterControllerApi'; -import { EndpointModel, BdcStatusModel } from '../controller/apiGenerated'; -import { Endpoint, Service } from '../utils'; -import { ConnectControllerDialog, ConnectControllerModel } from './connectControllerDialog'; -import { ControllerTreeDataProvider } from '../tree/controllerTreeDataProvider'; -import { AuthType } from 'bdc'; - -export type BdcDashboardOptions = { url: string, auth: AuthType, username: string, password: string, rememberPassword: boolean }; - -type BdcErrorType = 'bdcStatus' | 'bdcEndpoints' | 'general'; -export type BdcErrorEvent = { error: Error, errorType: BdcErrorType }; - -export class BdcDashboardModel { - - private _clusterController: ClusterController; - private _bdcStatus: BdcStatusModel | undefined; - private _endpoints: EndpointModel[] | undefined; - private _bdcStatusLastUpdated: Date | undefined; - private _endpointsLastUpdated: Date | undefined; - private readonly _onDidUpdateEndpoints = new vscode.EventEmitter(); - private readonly _onDidUpdateBdcStatus = new vscode.EventEmitter(); - private readonly _onBdcError = new vscode.EventEmitter(); - public onDidUpdateEndpoints = this._onDidUpdateEndpoints.event; - public onDidUpdateBdcStatus = this._onDidUpdateBdcStatus.event; - public onBdcError = this._onBdcError.event; - - constructor(private _options: BdcDashboardOptions, private _treeDataProvider: ControllerTreeDataProvider) { - try { - this._clusterController = new ClusterController(_options.url, _options.auth, _options.username, _options.password); - this.refresh().catch(e => console.log(`Unexpected error refreshing BdcModel ${e instanceof Error ? e.message : e}`)); - } catch { - this.promptReconnect().then(async () => { - await this.refresh(); - }).catch(error => { - this._onBdcError.fire({ error: error, errorType: 'general' }); - }); - } - } - - public get bdcStatus(): BdcStatusModel | undefined { - return this._bdcStatus; - } - - public get serviceEndpoints(): EndpointModel[] | undefined { - return this._endpoints; - } - - public get bdcStatusLastUpdated(): Date | undefined { - return this._bdcStatusLastUpdated; - } - - public get endpointsLastUpdated(): Date | undefined { - return this._endpointsLastUpdated; - } - - public async refresh(): Promise { - try { - if (!this._clusterController) { - // If this succeeds without error we know we have a clusterController at this point - await this.promptReconnect(); - } - - await Promise.all([ - this._clusterController.getBdcStatus(true).then(response => { - this._bdcStatus = response.bdcStatus; - this._bdcStatusLastUpdated = new Date(); - this._onDidUpdateBdcStatus.fire(this.bdcStatus); - }).catch(error => this._onBdcError.fire({ error: error, errorType: 'bdcStatus' })), - this._clusterController.getEndPoints(true).then(response => { - this._endpoints = response.endPoints; - fixEndpoints(this._endpoints); - this._endpointsLastUpdated = new Date(); - this._onDidUpdateEndpoints.fire(this.serviceEndpoints); - }).catch(error => this._onBdcError.fire({ error: error, errorType: 'bdcEndpoints' })) - ]); - } catch (error) { - this._onBdcError.fire({ error: error, errorType: 'general' }); - } - } - - /** - * Gets a partially filled connection profile for the SQL Server Master Instance endpoint - * associated with this cluster. - * @returns The IConnectionProfile - or undefined if the endpoints haven't been loaded yet - */ - public getSqlServerMasterConnectionProfile(): azdata.IConnectionProfile | undefined { - const sqlServerMasterEndpoint = this.serviceEndpoints && this.serviceEndpoints.find(e => e.name === Endpoint.sqlServerMaster); - if (!sqlServerMasterEndpoint) { - return undefined; - } - - // We default to sa - if that doesn't work then callers of this should open up a connection - // dialog so the user can enter in the correct connection information - return { - connectionName: undefined, - serverName: sqlServerMasterEndpoint.endpoint, - databaseName: undefined, - userName: 'sa', - password: this._options.password, - authenticationType: '', - savePassword: true, - groupFullName: undefined, - groupId: undefined, - providerName: 'MSSQL', - saveProfile: true, - id: undefined, - options: {} - }; - } - - /** - * Opens up a dialog prompting the user to re-enter credentials for the controller - */ - private async promptReconnect(): Promise { - this._clusterController = await new ConnectControllerDialog(new ConnectControllerModel(this._options)).showDialog(); - await this.updateController(); - } - - private async updateController(): Promise { - if (!this._clusterController) { - return; - } - this._treeDataProvider.addOrUpdateController( - this._clusterController.url, - this._clusterController.authType, - this._clusterController.username, - this._clusterController.password, - this._options.rememberPassword); - await this._treeDataProvider.saveControllers(); - } -} - -/** - * Retrieves the troubleshoot book URL for the specified service, defaulting to the BDC - * troubleshoot notebook if the service name is unknown. - * @param service The service name to get the troubleshoot notebook URL for - */ -export function getTroubleshootNotebookUrl(service?: string): string { - service = service || ''; - switch (service.toLowerCase()) { - case Service.sql: - return 'troubleshooters/tsg101-troubleshoot-sql-server'; - case Service.hdfs: - return 'troubleshooters/tsg102-troubleshoot-hdfs'; - case Service.spark: - return 'troubleshooters/tsg103-troubleshoot-spark'; - case Service.control: - return 'troubleshooters/tsg104-troubleshoot-control'; - case Service.gateway: - return 'troubleshooters/tsg105-troubleshoot-gateway'; - case Service.app: - return 'troubleshooters/tsg106-troubleshoot-app'; - } - return 'troubleshooters/tsg100-troubleshoot-bdc'; -} - -/** - * Applies fixes to the endpoints received so they are displayed correctly - * @param endpoints The endpoints received to modify - */ -function fixEndpoints(endpoints?: EndpointModel[]): void { - if (!endpoints) { - return; - } - endpoints.forEach(e => { - if (e.name === Endpoint.metricsui && e.endpoint && e.endpoint.indexOf('/d/wZx3OUdmz') === -1) { - // Update to have correct URL - e.endpoint += '/d/wZx3OUdmz'; - } - if (e.name === Endpoint.logsui && e.endpoint && e.endpoint.indexOf('/app/kibana#/discover') === -1) { - // Update to have correct URL - e.endpoint += '/app/kibana#/discover'; - } - }); -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardOverviewPage.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardOverviewPage.ts deleted file mode 100644 index 94a5d20950..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardOverviewPage.ts +++ /dev/null @@ -1,468 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { BdcDashboardModel, BdcErrorEvent } from './bdcDashboardModel'; -import { IconPathHelper, cssStyles } from '../constants'; -import { getStateDisplayText, getHealthStatusDisplayText, getEndpointDisplayText, getHealthStatusIcon, getServiceNameDisplayText, Endpoint, getBdcStatusErrorMessage } from '../utils'; -import { EndpointModel, BdcStatusModel } from '../controller/apiGenerated'; -import { createViewDetailsButton } from './commonControls'; -import { HdfsDialogCancelledError } from './hdfsDialogBase'; -import { BdcDashboardPage } from './bdcDashboardPage'; -import * as loc from '../localizedConstants'; - -const hyperlinkedEndpoints = [Endpoint.metricsui, Endpoint.logsui, Endpoint.sparkHistory, Endpoint.yarnUi]; - -export class BdcDashboardOverviewPage extends BdcDashboardPage { - private rootContainer: azdata.FlexContainer; - private lastUpdatedLabel: azdata.TextComponent; - private propertiesContainerLoadingComponent: azdata.LoadingComponent; - - private serviceStatusTable: azdata.DeclarativeTableComponent; - private endpointsTable: azdata.DeclarativeTableComponent; - private endpointsLoadingComponent: azdata.LoadingComponent; - private endpointsDisplayContainer: azdata.FlexContainer; - private serviceStatusLoadingComponent: azdata.LoadingComponent; - private serviceStatusDisplayContainer: azdata.FlexContainer; - private propertiesErrorMessage: azdata.TextComponent; - private endpointsErrorMessage: azdata.TextComponent; - private serviceStatusErrorMessage: azdata.TextComponent; - - constructor(model: BdcDashboardModel, modelView: azdata.ModelView, private dashboard: azdata.window.ModelViewDashboard) { - super(model, modelView); - this.model.onDidUpdateEndpoints(endpoints => this.eventuallyRunOnInitialized(() => this.handleEndpointsUpdate(endpoints))); - this.model.onDidUpdateBdcStatus(bdcStatus => this.eventuallyRunOnInitialized(() => this.handleBdcStatusUpdate(bdcStatus))); - this.model.onBdcError(error => this.eventuallyRunOnInitialized(() => this.handleBdcError(error))); - } - - public get container(): azdata.FlexContainer { - // Lazily create the container only when needed - if (!this.rootContainer) { - this.rootContainer = this.createContainer(); - } - return this.rootContainer; - } - - public createContainer(): azdata.FlexContainer { - const rootContainer = this.modelView.modelBuilder.flexContainer().withLayout( - { - flexFlow: 'column', - width: '100%', - height: '100%' - }).component(); - - // ############## - // # PROPERTIES # - // ############## - - const propertiesLabel = this.modelView.modelBuilder.text() - .withProps({ value: loc.clusterProperties, CSSStyles: { 'margin-block-start': '0px', 'margin-block-end': '10px' } }) - .component(); - rootContainer.addItem(propertiesLabel, { CSSStyles: { 'margin-top': '15px', 'padding-left': '10px', ...cssStyles.title } }); - - const propertiesContainer = this.modelView.modelBuilder.propertiesContainer().component(); - - this.propertiesContainerLoadingComponent = this.modelView.modelBuilder.loadingComponent().withItem(propertiesContainer).component(); - rootContainer.addItem(this.propertiesContainerLoadingComponent, { flex: '0 0 auto', CSSStyles: { 'padding-left': '10px' } }); - - // ############ - // # OVERVIEW # - // ############ - - const overviewHeaderContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'row', height: '20px' }).component(); - rootContainer.addItem(overviewHeaderContainer, { CSSStyles: { 'padding-left': '10px', 'padding-top': '15px' } }); - - const overviewLabel = this.modelView.modelBuilder.text() - .withProps({ - value: loc.clusterOverview, - CSSStyles: { ...cssStyles.text } - }) - .component(); - - overviewHeaderContainer.addItem(overviewLabel, { CSSStyles: { ...cssStyles.title } }); - - this.lastUpdatedLabel = this.modelView.modelBuilder.text() - .withProps({ - value: loc.lastUpdated(), - CSSStyles: { ...cssStyles.lastUpdatedText } - }).component(); - - overviewHeaderContainer.addItem(this.lastUpdatedLabel, { CSSStyles: { 'margin-left': '45px' } }); - - const overviewContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'column', width: '100%', height: '100%' }).component(); - - this.serviceStatusTable = this.modelView.modelBuilder.declarativeTable() - .withProps( - { - columns: [ - { // status icon - displayName: '', - ariaLabel: loc.statusIcon, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 25, - headerCssStyles: { - 'border': 'none' - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // service - displayName: loc.serviceName, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 175, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // state - displayName: loc.state, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 150, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // health status - displayName: loc.healthStatus, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - 'text-align': 'left', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - }, - { // view details button - displayName: '', - ariaLabel: loc.viewErrorDetails, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 150, - headerCssStyles: { - 'border': 'none', - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - ], - data: [], - ariaLabel: loc.clusterOverview - }) - .component(); - - this.serviceStatusDisplayContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'column' }).component(); - this.serviceStatusDisplayContainer.addItem(this.serviceStatusTable); - - // Note we don't make the table a child of the loading component since making the loading component align correctly - // messes up the layout for the table that we display after loading is finished. Instead we'll just remove the loading - // component once it's finished loading the content - this.serviceStatusLoadingComponent = this.modelView.modelBuilder.loadingComponent() - .withProps({ CSSStyles: { 'padding-top': '0px', 'padding-bottom': '0px' } }) - .component(); - - this.serviceStatusDisplayContainer.addItem(this.serviceStatusLoadingComponent, { flex: '0 0 auto', CSSStyles: { 'padding-left': '150px', width: '30px' } }); - - this.serviceStatusErrorMessage = this.modelView.modelBuilder.text().withProps({ display: 'none', CSSStyles: { ...cssStyles.errorText } }).component(); - overviewContainer.addItem(this.serviceStatusErrorMessage); - - overviewContainer.addItem(this.serviceStatusDisplayContainer); - - rootContainer.addItem(overviewContainer, { flex: '0 0 auto' }); - - // ##################### - // # SERVICE ENDPOINTS # - // ##################### - - const endpointsLabel = this.modelView.modelBuilder.text() - .withProps({ value: loc.serviceEndpoints, CSSStyles: { 'margin-block-start': '20px', 'margin-block-end': '0px' } }) - .component(); - rootContainer.addItem(endpointsLabel, { CSSStyles: { 'padding-left': '10px', ...cssStyles.title } }); - - this.endpointsErrorMessage = this.modelView.modelBuilder.text().withProps({ display: 'none', CSSStyles: { ...cssStyles.errorText } }).component(); - - const endpointsContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'column', width: '100%', height: '100%' }).component(); - - this.endpointsTable = this.modelView.modelBuilder.declarativeTable() - .withProps( - { - columns: [ - { // service - displayName: loc.service, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 200, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // endpoint - displayName: loc.endpoint, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 350, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none', - 'overflow': 'hidden', - 'text-overflow': 'ellipsis' - }, - }, - { // copy - displayName: '', - ariaLabel: loc.copy, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 50, - headerCssStyles: { - 'border': 'none', - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - } - ], - data: [], - ariaLabel: loc.serviceEndpoints - }).component(); - - this.endpointsDisplayContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'column' }).component(); - this.endpointsDisplayContainer.addItem(this.endpointsTable); - - // Note we don't make the table a child of the loading component since making the loading component align correctly - // messes up the layout for the table that we display after loading is finished. Instead we'll just remove the loading - // component once it's finished loading the content - this.endpointsLoadingComponent = this.modelView.modelBuilder.loadingComponent() - .withProps({ CSSStyles: { 'padding-top': '0px', 'padding-bottom': '0px' } }) - .component(); - this.endpointsDisplayContainer.addItem(this.endpointsLoadingComponent, { flex: '0 0 auto', CSSStyles: { 'padding-left': '150px', width: '30px' } }); - - endpointsContainer.addItem(this.endpointsErrorMessage); - endpointsContainer.addItem(this.endpointsDisplayContainer); - rootContainer.addItem(endpointsContainer, { flex: '0 0 auto' }); - - this.initialized = true; - - // Now that we've created the UI load data from the model in case it already had data - this.handleEndpointsUpdate(this.model.serviceEndpoints); - this.handleBdcStatusUpdate(this.model.bdcStatus); - - return rootContainer; - } - - public onRefreshStarted(): void { - this.propertiesErrorMessage.display = 'none'; - this.serviceStatusErrorMessage.display = 'none'; - this.endpointsErrorMessage.display = 'none'; - - this.serviceStatusDisplayContainer.display = undefined; - this.propertiesContainerLoadingComponent.display = undefined; - this.endpointsDisplayContainer.display = undefined; - } - - private handleBdcStatusUpdate(bdcStatus?: BdcStatusModel): void { - if (!bdcStatus) { - return; - } - this.lastUpdatedLabel.value = loc.lastUpdated(this.model.bdcStatusLastUpdated); - - this.propertiesContainerLoadingComponent.loading = false; - - (this.propertiesContainerLoadingComponent.component).propertyItems = [ - { displayName: loc.clusterState, value: getStateDisplayText(bdcStatus.state) }, - { displayName: loc.healthStatus, value: getHealthStatusDisplayText(bdcStatus.healthStatus) } - ]; - - if (bdcStatus.services) { - this.serviceStatusTable.data = bdcStatus.services.map(serviceStatus => { - const statusIconCell = this.modelView.modelBuilder.text() - .withProps({ - value: getHealthStatusIcon(serviceStatus.healthStatus), - ariaRole: 'img', - title: getHealthStatusDisplayText(serviceStatus.healthStatus), - CSSStyles: { 'user-select': 'none', ...cssStyles.text } - }).component(); - const nameCell = this.modelView.modelBuilder.hyperlink() - .withProps({ - label: getServiceNameDisplayText(serviceStatus.serviceName), - url: '', - CSSStyles: { ...cssStyles.text } - }).component(); - nameCell.onDidClick(() => { - this.dashboard.selectTab(serviceStatus.serviceName); - }); - - const viewDetailsButton = serviceStatus.healthStatus !== 'healthy' && serviceStatus.details && serviceStatus.details.length > 0 ? createViewDetailsButton(this.modelView.modelBuilder, serviceStatus.details) : undefined; - return [ - statusIconCell, - nameCell, - getStateDisplayText(serviceStatus.state), - getHealthStatusDisplayText(serviceStatus.healthStatus), - viewDetailsButton]; - }); - this.serviceStatusDisplayContainer.removeItem(this.serviceStatusLoadingComponent); - } - } - - private handleEndpointsUpdate(endpoints?: EndpointModel[]): void { - if (!endpoints) { - return; - } - // Sort the endpoints. The sort method is that SQL Server Master is first - followed by all - // others in alphabetical order by endpoint - const sqlServerMasterEndpoints = endpoints.filter(e => e.name === Endpoint.sqlServerMaster); - endpoints = endpoints.filter(e => e.name !== Endpoint.sqlServerMaster) - .sort((e1, e2) => { - if (e1.endpoint < e2.endpoint) { return -1; } - if (e1.endpoint > e2.endpoint) { return 1; } - return 0; - }); - endpoints.unshift(...sqlServerMasterEndpoints); - - this.endpointsTable.dataValues = endpoints.map(e => { - const copyValueCell = this.modelView.modelBuilder.button().withProps({ title: loc.copy }).component(); - copyValueCell.iconPath = IconPathHelper.copy; - copyValueCell.onDidClick(() => { - vscode.env.clipboard.writeText(e.endpoint); - vscode.window.showInformationMessage(loc.copiedEndpoint(getEndpointDisplayText(e.name, e.description))); - }); - return [{ value: getEndpointDisplayText(e.name, e.description) }, - { value: createEndpointComponent(this.modelView.modelBuilder, e, this.model, hyperlinkedEndpoints.some(he => he === e.name)) }, - { value: copyValueCell }]; - }); - - this.endpointsDisplayContainer.removeItem(this.endpointsLoadingComponent); - } - - private handleBdcError(errorEvent: BdcErrorEvent): void { - if (errorEvent.errorType === 'bdcEndpoints') { - const errorMessage = loc.endpointsError(errorEvent.error.message); - this.showEndpointsError(errorMessage); - } else if (errorEvent.errorType === 'bdcStatus') { - this.showBdcStatusError(getBdcStatusErrorMessage(errorEvent.error)); - } else { - this.handleGeneralError(errorEvent.error); - } - } - - private showBdcStatusError(errorMessage: string): void { - this.serviceStatusDisplayContainer.display = 'none'; - this.propertiesContainerLoadingComponent.display = 'none'; - this.serviceStatusErrorMessage.value = errorMessage; - this.serviceStatusErrorMessage.display = undefined; - this.propertiesErrorMessage.value = errorMessage; - this.propertiesErrorMessage.display = undefined; - } - - private showEndpointsError(errorMessage: string): void { - this.endpointsDisplayContainer.display = 'none'; - this.endpointsErrorMessage.display = undefined; - this.endpointsErrorMessage.value = errorMessage; - } - - private handleGeneralError(error: Error): void { - if (error instanceof HdfsDialogCancelledError) { - const errorMessage = loc.noConnectionError; - this.showBdcStatusError(errorMessage); - this.showEndpointsError(errorMessage); - } else { - const errorMessage = loc.unexpectedError(error); - this.showBdcStatusError(errorMessage); - this.showEndpointsError(errorMessage); - } - } -} - -function createEndpointComponent(modelBuilder: azdata.ModelBuilder, endpoint: EndpointModel, bdcModel: BdcDashboardModel, isHyperlink: boolean): azdata.HyperlinkComponent | azdata.TextComponent { - if (isHyperlink) { - return modelBuilder.hyperlink() - .withProps({ - label: endpoint.endpoint, - title: endpoint.endpoint, - url: endpoint.endpoint - }) - .component(); - } - else if (endpoint.name === Endpoint.sqlServerMaster) { - const endpointCell = modelBuilder.hyperlink() - .withProps({ - title: endpoint.endpoint, - label: endpoint.endpoint, - url: '', - CSSStyles: { ...cssStyles.text } - }).component(); - endpointCell.onDidClick(async () => { - const connProfile = bdcModel.getSqlServerMasterConnectionProfile(); - const result = await azdata.connection.connect(connProfile, true, true); - if (!result.connected) { - if (result.errorMessage && result.errorMessage.length > 0) { - vscode.window.showErrorMessage(result.errorMessage); - } - // Clear out the password and username before connecting since those being wrong are likely the issue - connProfile.userName = undefined; - connProfile.password = undefined; - azdata.connection.openConnectionDialog(undefined, connProfile); - } - }); - return endpointCell; - } - else { - return modelBuilder.text() - .withProps({ - value: endpoint.endpoint, - title: endpoint.endpoint, - CSSStyles: { ...cssStyles.text } - }) - .component(); - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardPage.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardPage.ts deleted file mode 100644 index 951ba88324..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardPage.ts +++ /dev/null @@ -1,70 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { IconPathHelper } from '../constants'; -import { BdcDashboardModel, getTroubleshootNotebookUrl } from './bdcDashboardModel'; -import * as loc from '../localizedConstants'; -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { InitializingComponent } from './intializingComponent'; - -export abstract class BdcDashboardPage extends InitializingComponent { - - private _toolbarContainer: azdata.ToolbarContainer; - private _refreshButton: azdata.ButtonComponent; - - constructor(protected model: BdcDashboardModel, protected modelView: azdata.ModelView, protected serviceName?: string) { - super(); - } - - public get toolbarContainer(): azdata.ToolbarContainer { - // Lazily create the container only when needed - if (!this._toolbarContainer) { - this._toolbarContainer = this.createToolbarContainer(); - } - return this._toolbarContainer; - } - - protected createToolbarContainer(): azdata.ToolbarContainer { - // Refresh button - this._refreshButton = this.modelView.modelBuilder.button() - .withProps({ - label: loc.refresh, - iconPath: IconPathHelper.refresh - }).component(); - - this._refreshButton.onDidClick(async () => { - await this.doRefresh(); - }); - - const openTroubleshootNotebookButton = this.modelView.modelBuilder.button() - .withProps({ - label: loc.troubleshoot, - iconPath: IconPathHelper.notebook - }).component(); - - openTroubleshootNotebookButton.onDidClick(() => { - vscode.commands.executeCommand('books.sqlserver2019', getTroubleshootNotebookUrl(this.serviceName)); - }); - - return this.modelView.modelBuilder.toolbarContainer() - .withToolbarItems( - [ - { component: this._refreshButton }, - { component: openTroubleshootNotebookButton } - ] - ).component(); - } - - private async doRefresh(): Promise { - try { - this._refreshButton.enabled = false; - await this.model.refresh(); - } finally { - this._refreshButton.enabled = true; - } - } -} - diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardResourceStatusPage.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardResourceStatusPage.ts deleted file mode 100644 index 3b7c52eb89..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcDashboardResourceStatusPage.ts +++ /dev/null @@ -1,358 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { BdcDashboardModel } from './bdcDashboardModel'; -import { BdcStatusModel, InstanceStatusModel, ResourceStatusModel } from '../controller/apiGenerated'; -import { getHealthStatusDisplayText, getHealthStatusIcon, getStateDisplayText, Service } from '../utils'; -import { cssStyles } from '../constants'; -import { isNullOrUndefined } from 'util'; -import { createViewDetailsButton } from './commonControls'; -import { BdcDashboardPage } from './bdcDashboardPage'; -import * as loc from '../localizedConstants'; - -export class BdcDashboardResourceStatusPage extends BdcDashboardPage { - - private resourceStatusModel: ResourceStatusModel; - private rootContainer: azdata.FlexContainer; - private instanceHealthStatusTable: azdata.DeclarativeTableComponent; - private metricsAndLogsRowsTable: azdata.DeclarativeTableComponent; - private lastUpdatedLabel: azdata.TextComponent; - - constructor(model: BdcDashboardModel, modelView: azdata.ModelView, serviceName: string, private resourceName: string) { - super(model, modelView, serviceName); - this.model.onDidUpdateBdcStatus(bdcStatus => this.eventuallyRunOnInitialized(() => this.handleBdcStatusUpdate(bdcStatus))); - } - - public get container(): azdata.FlexContainer { - // Lazily create the container only when needed - if (!this.rootContainer) { - // We do this here so that we can have the resource model to use for populating the data - // in the tables. This is to get around a timing issue with ModelView tables - this.updateResourceStatusModel(this.model.bdcStatus); - this.createContainer(); - } - return this.rootContainer; - } - - private createContainer(): void { - this.rootContainer = this.modelView.modelBuilder.flexContainer().withLayout( - { - flexFlow: 'column', - width: '100%', - height: '100%' - }).component(); - - // ############################## - // # INSTANCE HEALTH AND STATUS # - // ############################## - - const healthStatusHeaderContainer = this.modelView.modelBuilder.flexContainer().withLayout({ flexFlow: 'row', height: '20px' }).component(); - this.rootContainer.addItem(healthStatusHeaderContainer, { CSSStyles: { 'padding-left': '10px', 'padding-top': '15px' } }); - - // Header label - const healthStatusHeaderLabel = this.modelView.modelBuilder.text() - .withProps({ - value: loc.healthStatusDetails, - CSSStyles: { 'margin-block-start': '0px', 'margin-block-end': '10px' } - }) - .component(); - - healthStatusHeaderContainer.addItem(healthStatusHeaderLabel, { CSSStyles: { ...cssStyles.title } }); - - // Last updated label - this.lastUpdatedLabel = this.modelView.modelBuilder.text() - .withProps({ - value: loc.lastUpdated(this.model.bdcStatusLastUpdated), - CSSStyles: { ...cssStyles.lastUpdatedText } - }).component(); - - healthStatusHeaderContainer.addItem(this.lastUpdatedLabel, { CSSStyles: { 'margin-left': '45px' } }); - - this.instanceHealthStatusTable = this.modelView.modelBuilder.declarativeTable() - .withProps( - { - columns: [ - { // status icon - displayName: '', - ariaLabel: loc.statusIcon, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 25, - headerCssStyles: { - 'border': 'none' - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // instance - displayName: loc.instance, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // state - displayName: loc.state, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 150, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - { // health status - displayName: loc.healthStatus, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - 'text-align': 'left', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - }, - { // view details button - displayName: '', - ariaLabel: loc.viewErrorDetails, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 150, - headerCssStyles: { - 'border': 'none' - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - }, - }, - ], - data: this.createHealthStatusRows(), - ariaLabel: loc.healthStatusDetails - }).component(); - this.rootContainer.addItem(this.instanceHealthStatusTable, { flex: '0 0 auto' }); - - // #################### - // # METRICS AND LOGS # - // #################### - - // Title label - const endpointsLabel = this.modelView.modelBuilder.text() - .withProps({ value: loc.metricsAndLogs, CSSStyles: { 'margin-block-start': '20px', 'margin-block-end': '0px' } }) - .component(); - this.rootContainer.addItem(endpointsLabel, { CSSStyles: { 'padding-left': '10px', ...cssStyles.title } }); - - let metricsAndLogsColumns: azdata.DeclarativeTableColumn[] = - [ - { // instance - displayName: loc.instance, - valueType: azdata.DeclarativeDataType.string, - isReadOnly: true, - width: 125, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - }, - { // node metrics - displayName: loc.nodeMetrics, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - } - ]; - - // Only show SQL metrics column for SQL resource instances - if (this.serviceName.toLowerCase() === Service.sql) { - metricsAndLogsColumns.push( - { // sql metrics - displayName: loc.sqlMetrics, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - 'text-align': 'left', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - }); - } - - metricsAndLogsColumns.push( - { // logs - displayName: loc.logs, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: 100, - headerCssStyles: { - 'border': 'none', - 'text-align': 'left', - ...cssStyles.tableHeader - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none' - } - }); - - this.metricsAndLogsRowsTable = this.modelView.modelBuilder.declarativeTable() - .withProps( - { - columns: metricsAndLogsColumns, - data: this.createMetricsAndLogsRows(), - ariaLabel: loc.metricsAndLogs - }).component(); - this.rootContainer.addItem(this.metricsAndLogsRowsTable, { flex: '0 0 auto' }); - this.initialized = true; - } - - private updateResourceStatusModel(bdcStatus?: BdcStatusModel): void { - // If we can't find the resource model for this resource then just - // default to keeping what we had originally - if (!bdcStatus) { - return; - } - const service = bdcStatus.services ? bdcStatus.services.find(s => s.serviceName === this.serviceName) : undefined; - this.resourceStatusModel = service ? service.resources.find(r => r.resourceName === this.resourceName) : this.resourceStatusModel; - } - - private handleBdcStatusUpdate(bdcStatus?: BdcStatusModel): void { - this.updateResourceStatusModel(bdcStatus); - - if (!this.resourceStatusModel || isNullOrUndefined(this.resourceStatusModel.instances)) { - return; - } - - this.lastUpdatedLabel.value = loc.lastUpdated(this.model.bdcStatusLastUpdated); - - this.instanceHealthStatusTable.data = this.createHealthStatusRows(); - - this.metricsAndLogsRowsTable.data = this.createMetricsAndLogsRows(); - } - - private createMetricsAndLogsRows(): any[][] { - return this.resourceStatusModel ? this.resourceStatusModel.instances.map(instanceStatus => this.createMetricsAndLogsRow(instanceStatus)) : []; - } - - private createHealthStatusRows(): any[][] { - return this.resourceStatusModel ? this.resourceStatusModel.instances.map(instanceStatus => this.createHealthStatusRow(instanceStatus)) : []; - } - - private createMetricsAndLogsRow(instanceStatus: InstanceStatusModel): any[] { - const row: any[] = [instanceStatus.instanceName]; - - // Not all instances have all logs available - in that case just display N/A instead of a link - if (isNullOrUndefined(instanceStatus.dashboards) || isNullOrUndefined(instanceStatus.dashboards.nodeMetricsUrl)) { - row.push(this.modelView.modelBuilder.text().withProps({ value: loc.notAvailable, CSSStyles: { ...cssStyles.text } }).component()); - } else { - row.push(this.modelView.modelBuilder.hyperlink().withProps({ - label: loc.view, - url: instanceStatus.dashboards.nodeMetricsUrl, - title: instanceStatus.dashboards.nodeMetricsUrl, - ariaLabel: loc.viewNodeMetrics(instanceStatus.dashboards.nodeMetricsUrl), - CSSStyles: { ...cssStyles.text } - }).component()); - } - - // Only show SQL metrics column for SQL resource instances - if (this.serviceName === Service.sql) { - // Not all instances have all logs available - in that case just display N/A instead of a link - if (isNullOrUndefined(instanceStatus.dashboards) || isNullOrUndefined(instanceStatus.dashboards.sqlMetricsUrl)) { - row.push(this.modelView.modelBuilder.text().withProps({ value: loc.notAvailable, CSSStyles: { ...cssStyles.text } }).component()); - } else { - row.push(this.modelView.modelBuilder.hyperlink().withProps({ - label: loc.view, - url: instanceStatus.dashboards.sqlMetricsUrl, - title: instanceStatus.dashboards.sqlMetricsUrl, - ariaLabel: loc.viewSqlMetrics(instanceStatus.dashboards.sqlMetricsUrl), - CSSStyles: { ...cssStyles.text } - }).component()); - } - } - - if (isNullOrUndefined(instanceStatus.dashboards) || isNullOrUndefined(instanceStatus.dashboards.logsUrl)) { - row.push(this.modelView.modelBuilder.text().withProps({ value: loc.notAvailable, CSSStyles: { ...cssStyles.text } }).component()); - } else { - row.push(this.modelView.modelBuilder.hyperlink().withProps({ - label: loc.view, - url: instanceStatus.dashboards.logsUrl, - title: instanceStatus.dashboards.logsUrl, - ariaLabel: loc.viewLogs(instanceStatus.dashboards.logsUrl), - CSSStyles: { ...cssStyles.text } - }).component()); - } - return row; - } - - private createHealthStatusRow(instanceStatus: InstanceStatusModel): any[] { - const statusIconCell = this.modelView.modelBuilder.text() - .withProps({ - value: getHealthStatusIcon(instanceStatus.healthStatus), - ariaRole: 'img', - title: getHealthStatusDisplayText(instanceStatus.healthStatus), - CSSStyles: { 'user-select': 'none', ...cssStyles.text } - }).component(); - - const viewDetailsButton = instanceStatus.healthStatus !== 'healthy' && instanceStatus.details && instanceStatus.details.length > 0 ? createViewDetailsButton(this.modelView.modelBuilder, instanceStatus.details) : undefined; - return [ - statusIconCell, - instanceStatus.instanceName, - getStateDisplayText(instanceStatus.state), - getHealthStatusDisplayText(instanceStatus.healthStatus), - viewDetailsButton]; - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcServiceStatusPage.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcServiceStatusPage.ts deleted file mode 100644 index 118c2ab3de..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/bdcServiceStatusPage.ts +++ /dev/null @@ -1,72 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { BdcStatusModel, ResourceStatusModel } from '../controller/apiGenerated'; -import { BdcDashboardResourceStatusPage } from './bdcDashboardResourceStatusPage'; -import { BdcDashboardModel } from './bdcDashboardModel'; -import { BdcDashboardPage } from './bdcDashboardPage'; -import { getHealthStatusDotIcon } from '../utils'; - -export class BdcServiceStatusPage extends BdcDashboardPage { - - private createdResourceTabs: Map = new Map(); - private tabbedPanel: azdata.TabbedPanelComponent; - - constructor(serviceName: string, model: BdcDashboardModel, modelView: azdata.ModelView) { - super(model, modelView, serviceName); - this.model.onDidUpdateBdcStatus(bdcStatus => this.eventuallyRunOnInitialized(() => this.handleBdcStatusUpdate(bdcStatus))); - } - - public get container(): azdata.TabbedPanelComponent { - // Lazily create the container only when needed - if (!this.tabbedPanel) { - this.createPage(); - } - return this.tabbedPanel; - } - - private createPage(): void { - this.tabbedPanel = this.modelView.modelBuilder.tabbedPanel() - .withLayout({ showIcon: true, alwaysShowTabs: true }).component(); - - // Initialize our set of tab pages - this.handleBdcStatusUpdate(this.model.bdcStatus); - - this.initialized = true; - } - - private handleBdcStatusUpdate(bdcStatus: BdcStatusModel): void { - if (!bdcStatus) { - return; - } - const service = bdcStatus.services.find(s => s.serviceName === this.serviceName); - if (service && service.resources) { - this.updateResourcePages(service.resources); - } - } - - /** - * Update the resource tab pages, creating any new ones as necessary - */ - private updateResourcePages(resources: ResourceStatusModel[]): void { - resources.forEach(resource => { - const existingTab = this.createdResourceTabs.get(resource.resourceName); - if (existingTab) { - existingTab.icon = getHealthStatusDotIcon(resource.healthStatus); - } else { - const resourceStatusPage = new BdcDashboardResourceStatusPage(this.model, this.modelView, this.serviceName, resource.resourceName); - const newTab: azdata.Tab = { - title: resource.resourceName, - id: resource.resourceName, - content: resourceStatusPage.container, - icon: getHealthStatusDotIcon(resource.healthStatus) - }; - this.createdResourceTabs.set(resource.resourceName, newTab); - } - }); - this.tabbedPanel.updateTabs(Array.from(this.createdResourceTabs.values())); - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/commonControls.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/commonControls.ts deleted file mode 100644 index 2b8515858c..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/commonControls.ts +++ /dev/null @@ -1,20 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as loc from '../localizedConstants'; - -export function createViewDetailsButton(modelBuilder: azdata.ModelBuilder, text: string): azdata.ButtonComponent { - const viewDetailsButton = modelBuilder.button().withProps({ - label: loc.viewDetails, - ariaLabel: loc.viewErrorDetails, - secondary: true - }).component(); - viewDetailsButton.onDidClick(() => { - vscode.window.showErrorMessage(text, { modal: true }); - }); - return viewDetailsButton; -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/connectControllerDialog.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/connectControllerDialog.ts deleted file mode 100644 index a7b99acfb4..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/connectControllerDialog.ts +++ /dev/null @@ -1,49 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { HdfsDialogBase, HdfsDialogModelBase, HdfsDialogProperties } from './hdfsDialogBase'; -import { ClusterController } from '../controller/clusterControllerApi'; -import * as loc from '../localizedConstants'; - - -export class ConnectControllerDialog extends HdfsDialogBase { - constructor(model: ConnectControllerModel) { - super(loc.connectToController, model); - } - - protected getMainSectionComponents(): (azdata.FormComponentGroup | azdata.FormComponent)[] { - return []; - } - - protected async validate(): Promise<{ validated: boolean, value?: ClusterController }> { - try { - const controller = await this.model.onComplete({ - url: this.urlInputBox && this.urlInputBox.value, - auth: this.authValue, - username: this.usernameInputBox && this.usernameInputBox.value, - password: this.passwordInputBox && this.passwordInputBox.value - }); - return { validated: true, value: controller }; - } catch (error) { - await this.reportError(error); - return { validated: false, value: undefined }; - } - } -} - -export class ConnectControllerModel extends HdfsDialogModelBase { - - constructor(props: HdfsDialogProperties) { - super(props); - } - - protected async handleCompleted(): Promise { - this.throwIfMissingUsernamePassword(); - - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - return await this.createAndVerifyControllerConnection(); - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/hdfsDialogBase.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/hdfsDialogBase.ts deleted file mode 100644 index a92b17281e..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/hdfsDialogBase.ts +++ /dev/null @@ -1,240 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { ClusterController, ControllerError } from '../controller/clusterControllerApi'; -import { Deferred } from '../../common/promise'; -import * as loc from '../localizedConstants'; -import { AuthType, IEndPointsResponse } from 'bdc'; - -function getAuthCategory(name: AuthType): azdata.CategoryValue { - if (name === 'basic') { - return { name: name, displayName: loc.basic }; - } - return { name: name, displayName: loc.windowsAuth }; -} - -export interface HdfsDialogProperties { - url?: string; - auth?: AuthType; - username?: string; - password?: string; -} - -export class HdfsDialogCancelledError extends Error { - constructor(message: string = 'Dialog cancelled') { - super(message); - } -} - -export abstract class HdfsDialogModelBase { - protected _canceled = false; - private _authTypes: azdata.CategoryValue[]; - constructor( - public props: T - ) { - if (!props.auth) { - this.props.auth = 'basic'; - } - } - - public get authCategories(): azdata.CategoryValue[] { - if (!this._authTypes) { - this._authTypes = [getAuthCategory('basic'), getAuthCategory('integrated')]; - } - return this._authTypes; - } - - public get authCategory(): azdata.CategoryValue { - return getAuthCategory(this.props.auth); - } - - public async onComplete(props: T): Promise { - try { - this.props = props; - return await this.handleCompleted(); - } catch (error) { - // Ignore the error if we cancelled the request since we can't stop the actual request from completing - if (!this._canceled) { - throw error; - } - return undefined; - } - } - - protected abstract handleCompleted(): Promise; - - public async onError(error: ControllerError): Promise { - // implement - } - - public async onCancel(): Promise { - this._canceled = true; - } - - protected createController(): ClusterController { - return new ClusterController(this.props.url, this.props.auth, this.props.username, this.props.password); - } - - protected async createAndVerifyControllerConnection(): Promise { - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - let controller = this.createController(); - let response: IEndPointsResponse; - try { - response = await controller.getEndPoints(); - if (!response || !response.endPoints) { - throw new Error(loc.loginFailed); - } - } catch (err) { - throw new Error(loc.loginFailedWithError(err)); - } - return controller; - } - - protected throwIfMissingUsernamePassword(): void { - if (this.props.auth === 'basic') { - // Verify username and password as we can't make them required in the UI - if (!this.props.username) { - throw new Error(loc.usernameRequired); - } else if (!this.props.password) { - throw new Error(loc.passwordRequired); - } - } - } -} - -export abstract class HdfsDialogBase { - - protected dialog: azdata.window.Dialog; - protected uiModelBuilder!: azdata.ModelBuilder; - - protected urlInputBox!: azdata.InputBoxComponent; - protected authDropdown!: azdata.DropDownComponent; - protected usernameInputBox!: azdata.InputBoxComponent; - protected passwordInputBox!: azdata.InputBoxComponent; - - private returnPromise: Deferred; - - constructor(private title: string, protected model: HdfsDialogModelBase) { - } - - public async showDialog(): Promise { - this.returnPromise = new Deferred(); - this.createDialog(); - azdata.window.openDialog(this.dialog); - return this.returnPromise.promise; - } - - private createDialog(): void { - this.dialog = azdata.window.createModelViewDialog(this.title); - this.dialog.registerContent(async view => { - this.uiModelBuilder = view.modelBuilder; - - this.urlInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.url.toLocaleLowerCase(), - value: this.model.props.url, - enabled: false - }).component(); - - this.authDropdown = this.uiModelBuilder.dropDown().withProps({ - values: this.model.authCategories, - value: this.model.authCategory, - editable: false, - }).component(); - this.authDropdown.onValueChanged(e => this.onAuthChanged()); - this.usernameInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.username.toLocaleLowerCase(), - value: this.model.props.username - }).component(); - this.passwordInputBox = this.uiModelBuilder.inputBox() - .withProps({ - placeHolder: loc.password.toLocaleLowerCase(), - inputType: 'password', - value: this.model.props.password - }) - .component(); - - let connectionSection: azdata.FormComponentGroup = { - components: [ - { - component: this.urlInputBox, - title: loc.clusterUrl, - required: true - }, { - component: this.authDropdown, - title: loc.authType, - required: true - }, { - component: this.usernameInputBox, - title: loc.username, - required: false - }, { - component: this.passwordInputBox, - title: loc.password, - required: false - } - ], - title: loc.clusterConnection - }; - let formModel = this.uiModelBuilder.formContainer() - .withFormItems( - this.getMainSectionComponents().concat( - connectionSection) - ).withLayout({ width: '100%' }).component(); - - await view.initializeModel(formModel); - this.onAuthChanged(); - }); - - this.dialog.registerCloseValidator(async () => { - const result = await this.validate(); - if (result.validated) { - this.returnPromise.resolve(result.value); - this.returnPromise = undefined; - } - return result.validated; - }); - this.dialog.cancelButton.onClick(async () => await this.cancel()); - this.dialog.okButton.label = loc.ok; - this.dialog.cancelButton.label = loc.cancel; - } - - protected abstract getMainSectionComponents(): (azdata.FormComponentGroup | azdata.FormComponent)[]; - - protected get authValue(): AuthType { - return (this.authDropdown.value).name as AuthType; - } - - private onAuthChanged(): void { - let isBasic = this.authValue === 'basic'; - this.usernameInputBox.enabled = isBasic; - this.passwordInputBox.enabled = isBasic; - if (!isBasic) { - this.usernameInputBox.value = ''; - this.passwordInputBox.value = ''; - } - } - - protected abstract validate(): Promise<{ validated: boolean, value?: R }>; - - private async cancel(): Promise { - if (this.model && this.model.onCancel) { - await this.model.onCancel(); - } - this.returnPromise.reject(new HdfsDialogCancelledError()); - } - - protected async reportError(error: any): Promise { - this.dialog.message = { - text: (typeof error === 'string') ? error : error.message, - level: azdata.window.MessageLevel.Error - }; - if (this.model && this.model.onError) { - await this.model.onError(error as ControllerError); - } - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/intializingComponent.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/intializingComponent.ts deleted file mode 100644 index 046ebe7cf8..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/intializingComponent.ts +++ /dev/null @@ -1,40 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { Deferred } from '../../common/promise'; - -export abstract class InitializingComponent { - - private _initialized: boolean = false; - - private _onInitializedPromise: Deferred = new Deferred(); - - constructor() { } - - protected get initialized(): boolean { - return this._initialized; - } - - protected set initialized(value: boolean) { - if (!this._initialized && value) { - this._initialized = true; - this._onInitializedPromise.resolve(); - } - } - - /** - * Runs the specified action when the component is initialized. If already initialized just runs - * the action immediately. - * @param action The action to be ran when the page is initialized - */ - protected eventuallyRunOnInitialized(action: () => void): void { - if (!this._initialized) { - this._onInitializedPromise.promise.then(() => action()).catch(error => console.error(`Unexpected error running onInitialized action for BDC Page : ${error}`)); - } else { - action(); - } - } -} - diff --git a/extensions/big-data-cluster/src/bigDataCluster/dialog/mountHdfsDialog.ts b/extensions/big-data-cluster/src/bigDataCluster/dialog/mountHdfsDialog.ts deleted file mode 100644 index fa0a48eaf1..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/dialog/mountHdfsDialog.ts +++ /dev/null @@ -1,378 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import { ClusterController, MountInfo, MountState } from '../controller/clusterControllerApi'; -import { HdfsDialogBase, HdfsDialogModelBase, HdfsDialogProperties } from './hdfsDialogBase'; -import * as loc from '../localizedConstants'; - -/** - * Converts a comma-delimited set of key value pair credentials to a JSON object. - * This code is taken from the azdata implementation written in Python - */ -function convertCredsToJson(creds: string): { credentials: {} } { - if (!creds) { - return undefined; - } - let credObj: { 'credentials': { [key: string]: any } } = { 'credentials': {} }; - let pairs = creds.split(','); - let validPairs: string[] = []; - for (let i = 0; i < pairs.length; i++) { - // handle escaped commas in a browser-agnostic way using regex: - // this matches a string ending in a single escape character \, but not \\. - // In this case we split on ',` when we should've ignored it as it was a \, instead. - // Restore the escaped comma by combining the 2 split strings - if (i < (pairs.length - 1) && pairs[i].match(/(?!\\).*\\$/)) { - pairs[i + 1] = `${pairs[i]},${pairs[i + 1]}`; - } else { - validPairs.push(pairs[i]); - } - } - - validPairs.forEach(pair => { - const formattingErr = loc.badCredentialsFormatting(pair); - try { - // # remove escaped characters for , - pair = pair.replace('\\,', ',').trim(); - let firstEquals = pair.indexOf('='); - if (firstEquals <= 0 || firstEquals >= pair.length) { - throw new Error(formattingErr); - } - let key = pair.substring(0, firstEquals); - let value = pair.substring(firstEquals + 1); - credObj.credentials[key] = value; - } catch (err) { - throw new Error(formattingErr); - } - }); - return credObj; -} - -export interface MountHdfsProperties extends HdfsDialogProperties { - hdfsPath?: string; - remoteUri?: string; - credentials?: string; -} - -export class MountHdfsDialogModel extends HdfsDialogModelBase { - private credentials: {}; - - constructor(props: MountHdfsProperties) { - super(props); - } - - protected async handleCompleted(): Promise { - this.throwIfMissingUsernamePassword(); - // Validate credentials - this.credentials = convertCredsToJson(this.props.credentials); - - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - let controller = await this.createAndVerifyControllerConnection(); - if (this._canceled) { - return; - } - azdata.tasks.startBackgroundOperation( - { - connection: undefined, - displayName: loc.mountTask(this.props.hdfsPath), - description: '', - isCancelable: false, - operation: op => { - this.onSubmit(controller, op); - } - } - ); - } - - private async onSubmit(controller: ClusterController, op: azdata.BackgroundOperation): Promise { - try { - await controller.mountHdfs(this.props.hdfsPath, this.props.remoteUri, this.credentials); - op.updateStatus(azdata.TaskStatus.InProgress, loc.mountTaskSubmitted); - - // Wait until status has changed or some sensible time expired. If it goes over 2 minutes we assume it's "working" - // as there's no other API that'll give us this for now - let result = await this.waitOnMountStatusChange(controller); - let msg = result.state === MountState.Ready ? loc.mountCompleted : loc.mountInProgress; - op.updateStatus(azdata.TaskStatus.Succeeded, msg); - } catch (error) { - const errMsg = loc.mountError(error); - vscode.window.showErrorMessage(errMsg); - op.updateStatus(azdata.TaskStatus.Failed, errMsg); - } - } - - private waitOnMountStatusChange(controller: ClusterController): Promise { - return new Promise((resolve, reject) => { - const waitTime = 5 * 1000; // 5 seconds - const maxRetries = 30; // 5 x 30 = 150 seconds. After this time, can assume things are "working" as 2 min timeout passed - let waitOnChange = async (retries: number) => { - try { - let mountInfo = await this.getMountStatus(controller, this.props.hdfsPath); - if (mountInfo && mountInfo.error || mountInfo.state === MountState.Error) { - reject(new Error(mountInfo.error ? mountInfo.error : loc.mountErrorUnknown)); - } else if (mountInfo.state === MountState.Ready || retries <= 0) { - resolve(mountInfo); - } else { - setTimeout(() => { - waitOnChange(retries - 1).catch(e => reject(e)); - }, waitTime); - } - } catch (err) { - reject(err); - } - }; - waitOnChange(maxRetries); - }); - } - - private async getMountStatus(controller: ClusterController, path: string): Promise { - let statusResponse = await controller.getMountStatus(path); - if (statusResponse.mount) { - return Array.isArray(statusResponse.mount) ? statusResponse.mount[0] : statusResponse.mount; - } - return undefined; - } -} - -export class MountHdfsDialog extends HdfsDialogBase { - private pathInputBox: azdata.InputBoxComponent; - private remoteUriInputBox: azdata.InputBoxComponent; - private credentialsInputBox: azdata.InputBoxComponent; - - constructor(model: MountHdfsDialogModel) { - super(loc.mountFolder, model); - } - - protected getMainSectionComponents(): (azdata.FormComponentGroup | azdata.FormComponent)[] { - const newMountName = '/mymount'; - let pathVal = this.model.props.hdfsPath; - pathVal = (!pathVal || pathVal === '/') ? newMountName : (pathVal + newMountName); - this.pathInputBox = this.uiModelBuilder.inputBox() - .withProps({ - value: pathVal - }).component(); - this.remoteUriInputBox = this.uiModelBuilder.inputBox() - .withProps({ - value: this.model.props.remoteUri - }) - .component(); - this.credentialsInputBox = this.uiModelBuilder.inputBox() - .withProps({ - inputType: 'password', - value: this.model.props.credentials - }) - .component(); - - return [ - { - components: [ - { - component: this.pathInputBox, - title: loc.hdfsPath, - required: true, - layout: { - info: loc.hdfsPathInfo - } - }, { - component: this.remoteUriInputBox, - title: loc.remoteUri, - required: true, - layout: { - info: loc.remoteUriInfo - } - }, { - component: this.credentialsInputBox, - title: loc.credentials, - required: false, - layout: { - info: loc.credentialsInfo - } - } - ], - title: loc.mountConfiguration - }]; - } - - protected async validate(): Promise<{ validated: boolean }> { - try { - await this.model.onComplete({ - url: this.urlInputBox && this.urlInputBox.value, - auth: this.authValue, - username: this.usernameInputBox && this.usernameInputBox.value, - password: this.passwordInputBox && this.passwordInputBox.value, - hdfsPath: this.pathInputBox && this.pathInputBox.value, - remoteUri: this.remoteUriInputBox && this.remoteUriInputBox.value, - credentials: this.credentialsInputBox && this.credentialsInputBox.value - }); - return { validated: true }; - } catch (error) { - await this.reportError(error); - return { validated: false }; - } - } -} - -export class RefreshMountDialog extends HdfsDialogBase { - private pathInputBox: azdata.InputBoxComponent; - - constructor(model: RefreshMountModel) { - super(loc.refreshMount, model); - } - - protected getMainSectionComponents(): (azdata.FormComponentGroup | azdata.FormComponent)[] { - this.pathInputBox = this.uiModelBuilder.inputBox() - .withProps({ - value: this.model.props.hdfsPath - }).component(); - return [ - { - components: [ - { - component: this.pathInputBox, - title: loc.hdfsPath, - required: true - } - ], - title: loc.mountConfiguration - }]; - } - - protected async validate(): Promise<{ validated: boolean }> { - try { - await this.model.onComplete({ - url: this.urlInputBox && this.urlInputBox.value, - auth: this.authValue, - username: this.usernameInputBox && this.usernameInputBox.value, - password: this.passwordInputBox && this.passwordInputBox.value, - hdfsPath: this.pathInputBox && this.pathInputBox.value - }); - return { validated: true }; - } catch (error) { - await this.reportError(error); - return { validated: false }; - } - } -} - -export class RefreshMountModel extends HdfsDialogModelBase { - - constructor(props: MountHdfsProperties) { - super(props); - } - - protected async handleCompleted(): Promise { - this.throwIfMissingUsernamePassword(); - - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - let controller = await this.createAndVerifyControllerConnection(); - if (this._canceled) { - return; - } - azdata.tasks.startBackgroundOperation( - { - connection: undefined, - displayName: loc.refreshMountTask(this.props.hdfsPath), - description: '', - isCancelable: false, - operation: op => { - this.onSubmit(controller, op); - } - } - ); - } - - private async onSubmit(controller: ClusterController, op: azdata.BackgroundOperation): Promise { - try { - await controller.refreshMount(this.props.hdfsPath); - op.updateStatus(azdata.TaskStatus.Succeeded, loc.refreshMountTaskSubmitted); - } catch (error) { - const errMsg = (error instanceof Error) ? error.message : error; - vscode.window.showErrorMessage(errMsg); - op.updateStatus(azdata.TaskStatus.Failed, errMsg); - } - } -} - -export class DeleteMountDialog extends HdfsDialogBase { - private pathInputBox: azdata.InputBoxComponent; - - constructor(model: DeleteMountModel) { - super(loc.deleteMount, model); - } - - protected getMainSectionComponents(): (azdata.FormComponentGroup | azdata.FormComponent)[] { - this.pathInputBox = this.uiModelBuilder.inputBox() - .withProps({ - value: this.model.props.hdfsPath - }).component(); - return [ - { - components: [ - { - component: this.pathInputBox, - title: loc.hdfsPath, - required: true - } - ], - title: loc.mountConfiguration - }]; - } - - protected async validate(): Promise<{ validated: boolean }> { - try { - await this.model.onComplete({ - url: this.urlInputBox && this.urlInputBox.value, - auth: this.authValue, - username: this.usernameInputBox && this.usernameInputBox.value, - password: this.passwordInputBox && this.passwordInputBox.value, - hdfsPath: this.pathInputBox && this.pathInputBox.value - }); - return { validated: true }; - } catch (error) { - await this.reportError(error); - return { validated: false }; - } - } -} - -export class DeleteMountModel extends HdfsDialogModelBase { - - constructor(props: MountHdfsProperties) { - super(props); - } - - protected async handleCompleted(): Promise { - this.throwIfMissingUsernamePassword(); - - // We pre-fetch the endpoints here to verify that the information entered is correct (the user is able to connect) - let controller = await this.createAndVerifyControllerConnection(); - if (this._canceled) { - return; - } - azdata.tasks.startBackgroundOperation( - { - connection: undefined, - displayName: loc.deleteMountTask(this.props.hdfsPath), - description: '', - isCancelable: false, - operation: op => { - this.onSubmit(controller, op); - } - } - ); - } - - private async onSubmit(controller: ClusterController, op: azdata.BackgroundOperation): Promise { - try { - await controller.deleteMount(this.props.hdfsPath); - op.updateStatus(azdata.TaskStatus.Succeeded, loc.deleteMountTaskSubmitted); - } catch (error) { - const errMsg = (error instanceof Error) ? error.message : error; - vscode.window.showErrorMessage(errMsg); - op.updateStatus(azdata.TaskStatus.Failed, errMsg); - } - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/localizedConstants.ts b/extensions/big-data-cluster/src/bigDataCluster/localizedConstants.ts deleted file mode 100644 index 902764f285..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/localizedConstants.ts +++ /dev/null @@ -1,91 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as nls from 'vscode-nls'; -import { ControllerError } from './controller/clusterControllerApi'; -const localize = nls.loadMessageBundle(); - -// Labels -export const statusIcon = localize('bdc.dashboard.status', "Status Icon"); -export const instance = localize('bdc.dashboard.instance', "Instance"); -export const state = localize('bdc.dashboard.state', "State"); -export const view = localize('bdc.dashboard.view', "View"); -export const notAvailable = localize('bdc.dashboard.notAvailable', "N/A"); -export const healthStatusDetails = localize('bdc.dashboard.healthStatusDetails', "Health Status Details"); -export const metricsAndLogs = localize('bdc.dashboard.metricsAndLogs', "Metrics and Logs"); -export const healthStatus = localize('bdc.dashboard.healthStatus', "Health Status"); -export const nodeMetrics = localize('bdc.dashboard.nodeMetrics', "Node Metrics"); -export const sqlMetrics = localize('bdc.dashboard.sqlMetrics', "SQL Metrics"); -export const logs = localize('bdc.dashboard.logs', "Logs"); -export function viewNodeMetrics(uri: string): string { return localize('bdc.dashboard.viewNodeMetrics', "View Node Metrics {0}", uri); } -export function viewSqlMetrics(uri: string): string { return localize('bdc.dashboard.viewSqlMetrics', "View SQL Metrics {0}", uri); } -export function viewLogs(uri: string): string { return localize('bdc.dashboard.viewLogs', "View Kibana Logs {0}", uri); } -export function lastUpdated(date?: Date): string { - return localize('bdc.dashboard.lastUpdated', "Last Updated : {0}", - date ? - `${date.toLocaleDateString()} ${date.toLocaleTimeString()}` - : '-'); -} -export const basic = localize('basicAuthName', "Basic"); -export const windowsAuth = localize('integratedAuthName', "Windows Authentication"); -export const addNewController = localize('addNewController', "Add New Controller"); -export const url = localize('url', "URL"); -export const username = localize('username', "Username"); -export const password = localize('password', "Password"); -export const rememberPassword = localize('rememberPassword', "Remember Password"); -export const clusterUrl = localize('clusterManagementUrl', "Cluster Management URL"); -export const authType = localize('textAuthCapital', "Authentication type"); -export const clusterConnection = localize('hdsf.dialog.connection.section', "Cluster Connection"); -export const add = localize('add', "Add"); -export const cancel = localize('cancel', "Cancel"); -export const ok = localize('ok', "OK"); -export const refresh = localize('bdc.dashboard.refresh', "Refresh"); -export const troubleshoot = localize('bdc.dashboard.troubleshoot', "Troubleshoot"); -export const bdcOverview = localize('bdc.dashboard.bdcOverview', "Big Data Cluster overview"); -export const clusterDetails = localize('bdc.dashboard.clusterDetails', "Cluster Details"); -export const clusterOverview = localize('bdc.dashboard.clusterOverview', "Cluster Overview"); -export const serviceEndpoints = localize('bdc.dashboard.serviceEndpoints', "Service Endpoints"); -export const clusterProperties = localize('bdc.dashboard.clusterProperties', "Cluster Properties"); -export const clusterState = localize('bdc.dashboard.clusterState', "Cluster State"); -export const serviceName = localize('bdc.dashboard.serviceName', "Service Name"); -export const service = localize('bdc.dashboard.service', "Service"); -export const endpoint = localize('bdc.dashboard.endpoint', "Endpoint"); -export function copiedEndpoint(endpointName: string): string { return localize('copiedEndpoint', "Endpoint '{0}' copied to clipboard", endpointName); } -export const copy = localize('bdc.dashboard.copy', "Copy"); -export const viewDetails = localize('bdc.dashboard.viewDetails', "View Details"); -export const viewErrorDetails = localize('bdc.dashboard.viewErrorDetails', "View Error Details"); -export const connectToController = localize('connectController.dialog.title', "Connect to Controller"); -export const mountConfiguration = localize('mount.main.section', "Mount Configuration"); -export function mountTask(path: string): string { return localize('mount.task.name', "Mounting HDFS folder on path {0}", path); } -export function refreshMountTask(path: string): string { return localize('refreshmount.task.name', "Refreshing HDFS Mount on path {0}", path); } -export function deleteMountTask(path: string): string { return localize('deletemount.task.name', "Deleting HDFS Mount on path {0}", path); } -export const mountTaskSubmitted = localize('mount.task.submitted', "Mount creation has started"); -export const refreshMountTaskSubmitted = localize('refreshmount.task.submitted', "Refresh mount request submitted"); -export const deleteMountTaskSubmitted = localize('deletemount.task.submitted', "Delete mount request submitted"); -export const mountCompleted = localize('mount.task.complete', "Mounting HDFS folder is complete"); -export const mountInProgress = localize('mount.task.inprogress', "Mounting is likely to complete, check back later to verify"); -export const mountFolder = localize('mount.dialog.title', "Mount HDFS Folder"); -export const hdfsPath = localize('mount.hdfsPath.title', "HDFS Path"); -export const hdfsPathInfo = localize('mount.hdfsPath.info', "Path to a new (non-existing) directory which you want to associate with the mount"); -export const remoteUri = localize('mount.remoteUri.title', "Remote URI"); -export const remoteUriInfo = localize('mount.remoteUri.info', "The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/"); -export const credentials = localize('mount.credentials.title', "Credentials"); -export const credentialsInfo = localize('mount.credentials.info', "Mount credentials for authentication to remote data source for reads"); -export const refreshMount = localize('refreshmount.dialog.title', "Refresh Mount"); -export const deleteMount = localize('deleteMount.dialog.title', "Delete Mount"); -export const loadingClusterStateCompleted = localize('bdc.dashboard.loadingClusterStateCompleted', "Loading cluster state completed"); -export const loadingHealthStatusCompleted = localize('bdc.dashboard.loadingHealthStatusCompleted', "Loading health status completed"); - -// Errors -export const usernameRequired = localize('err.controller.username.required', "Username is required"); -export const passwordRequired = localize('err.controller.password.required', "Password is required"); -export function endpointsError(msg: string): string { return localize('endpointsError', "Unexpected error retrieving BDC Endpoints: {0}", msg); } -export const noConnectionError = localize('bdc.dashboard.noConnection', "The dashboard requires a connection. Please click retry to enter your credentials."); -export function unexpectedError(error: Error): string { return localize('bdc.dashboard.unexpectedError', "Unexpected error occurred: {0}", error.message); } -export const loginFailed = localize('mount.hdfs.loginerror1', "Login to controller failed"); -export function loginFailedWithError(error: ControllerError): string { return localize('mount.hdfs.loginerror2', "Login to controller failed: {0}", error.statusMessage || error.message); } -export function badCredentialsFormatting(pair: string): string { return localize('mount.err.formatting', "Bad formatting of credentials at {0}", pair); } -export function mountError(error: any): string { return localize('mount.task.error', "Error mounting folder: {0}", (error instanceof Error ? error.message : error)); } -export const mountErrorUnknown = localize('mount.error.unknown', "Unknown error occurred during the mount process"); diff --git a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeChangeHandler.ts b/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeChangeHandler.ts deleted file mode 100644 index a220148d94..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeChangeHandler.ts +++ /dev/null @@ -1,10 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { TreeNode } from './treeNode'; - -export interface IControllerTreeChangeHandler { - notifyNodeChanged(node?: TreeNode): void; -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeDataProvider.ts b/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeDataProvider.ts deleted file mode 100644 index b6111d0b6d..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeDataProvider.ts +++ /dev/null @@ -1,209 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import { TreeNode } from './treeNode'; -import { IControllerTreeChangeHandler } from './controllerTreeChangeHandler'; -import { ControllerRootNode, ControllerNode } from './controllerTreeNode'; -import { showErrorMessage } from '../utils'; -import { AuthType } from 'bdc'; - -const localize = nls.loadMessageBundle(); - -const CredentialNamespace = 'clusterControllerCredentials'; - -interface IControllerInfoSlim { - url: string; - auth: AuthType; - username: string; - password?: string; - rememberPassword: boolean; -} - -export class ControllerTreeDataProvider implements vscode.TreeDataProvider, IControllerTreeChangeHandler { - - private _onDidChangeTreeData: vscode.EventEmitter = new vscode.EventEmitter(); - public readonly onDidChangeTreeData: vscode.Event = this._onDidChangeTreeData.event; - private root: ControllerRootNode; - private credentialProvider: azdata.CredentialProvider; - private initialized: boolean = false; - - constructor(private memento: vscode.Memento) { - this.root = new ControllerRootNode(this); - } - - public async getChildren(element?: TreeNode): Promise { - if (element) { - return element.getChildren(); - } - - if (!this.initialized) { - try { - await this.loadSavedControllers(); - } catch (err) { - void vscode.window.showErrorMessage(localize('bdc.controllerTreeDataProvider.error', "Unexpected error loading saved controllers: {0}", err)); - } - } - - return this.root.getChildren(); - } - - public getTreeItem(element: TreeNode): vscode.TreeItem | Thenable { - return element.getTreeItem(); - } - - public notifyNodeChanged(node?: TreeNode): void { - this._onDidChangeTreeData.fire(node); - } - - /** - * Creates or updates a node in the tree with the specified connection information - * @param url The URL for the BDC management endpoint - * @param auth The type of auth to use - * @param username The username (if basic auth) - * @param password The password (if basic auth) - * @param rememberPassword Whether to store the password in the password store when saving - */ - public addOrUpdateController( - url: string, - auth: AuthType, - username: string, - password: string, - rememberPassword: boolean - ): void { - this.removeNonControllerNodes(); - this.root.addOrUpdateControllerNode(url, auth, username, password, rememberPassword); - this.notifyNodeChanged(); - } - - public removeController(url: string, auth: AuthType, username: string): ControllerNode[] { - let removed = this.root.removeControllerNode(url, auth, username); - if (removed) { - this.notifyNodeChanged(); - } - return removed; - } - - private removeNonControllerNodes(): void { - this.removeDefectiveControllerNodes(this.root.children); - } - - private removeDefectiveControllerNodes(nodes: TreeNode[]): void { - if (nodes.length > 0) { - for (let i = 0; i < nodes.length; ++i) { - if (nodes[i] instanceof ControllerNode) { - let controller = nodes[i] as ControllerNode; - if (!controller.url || !controller.id) { - nodes.splice(i--, 1); - } - } - } - } - } - - private async loadSavedControllers(): Promise { - // Optimistically set to true so we don't double-load the tree - this.initialized = true; - try { - let controllers: IControllerInfoSlim[] = this.memento.get('controllers'); - let treeNodes: TreeNode[] = []; - if (controllers) { - for (const c of controllers) { - let password = undefined; - if (c.rememberPassword) { - password = await this.getPassword(c.url, c.username); - } - if (!c.auth) { - // Added before we had added authentication - c.auth = 'basic'; - } - treeNodes.push(new ControllerNode( - c.url, c.auth, c.username, password, c.rememberPassword, - undefined, this.root, this, undefined - )); - } - this.removeDefectiveControllerNodes(treeNodes); - } - - this.root.clearChildren(); - treeNodes.forEach(node => this.root.addChild(node)); - await vscode.commands.executeCommand('setContext', 'bdc.loaded', true); - } catch (err) { - // Reset so we can try again if the tree refreshes - this.initialized = false; - throw err; - } - } - - public async saveControllers(): Promise { - const controllers = this.root.children.map((e): IControllerInfoSlim => { - const controller = e as ControllerNode; - return { - url: controller.url, - auth: controller.auth, - username: controller.username, - password: controller.password, - rememberPassword: controller.rememberPassword - }; - }); - - const controllersWithoutPassword = controllers.map((e): IControllerInfoSlim => { - return { - url: e.url, - auth: e.auth, - username: e.username, - rememberPassword: e.rememberPassword - }; - }); - - try { - await this.memento.update('controllers', controllersWithoutPassword); - } catch (error) { - showErrorMessage(error); - } - - for (const e of controllers) { - if (e.rememberPassword) { - await this.savePassword(e.url, e.username, e.password); - } else { - await this.deletePassword(e.url, e.username); - } - } - } - - private async savePassword(url: string, username: string, password: string): Promise { - let provider = await this.getCredentialProvider(); - let id = this.createId(url, username); - let result = await provider.saveCredential(id, password); - return result; - } - - private async deletePassword(url: string, username: string): Promise { - let provider = await this.getCredentialProvider(); - let id = this.createId(url, username); - let result = await provider.deleteCredential(id); - return result; - } - - private async getPassword(url: string, username: string): Promise { - let provider = await this.getCredentialProvider(); - let id = this.createId(url, username); - let credential = await provider.readCredential(id); - return credential ? credential.password : undefined; - } - - private async getCredentialProvider(): Promise { - if (!this.credentialProvider) { - this.credentialProvider = await azdata.credentials.getProvider(CredentialNamespace); - } - return this.credentialProvider; - } - - private createId(url: string, username: string): string { - return `${url}::${username}`; - } -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeNode.ts b/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeNode.ts deleted file mode 100644 index f34e41aa21..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/tree/controllerTreeNode.ts +++ /dev/null @@ -1,247 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import { IControllerTreeChangeHandler } from './controllerTreeChangeHandler'; -import { TreeNode } from './treeNode'; -import { IconPathHelper, BdcItemType, IconPath } from '../constants'; -import { AuthType } from 'bdc'; - -abstract class ControllerTreeNode extends TreeNode { - - constructor( - label: string, - parent: ControllerTreeNode, - private _treeChangeHandler: IControllerTreeChangeHandler, - private _description?: string, - private _nodeType?: string, - private _iconPath?: IconPath - ) { - super(label, parent); - this._description = this._description || this.label; - } - - public async getChildren(): Promise { - return this.children as ControllerTreeNode[]; - } - - public override refresh(): void { - super.refresh(); - this.treeChangeHandler.notifyNodeChanged(this); - } - - public getTreeItem(): vscode.TreeItem { - let item: vscode.TreeItem = {}; - item.id = this.id; - item.label = this.label; - item.collapsibleState = vscode.TreeItemCollapsibleState.None; - item.iconPath = this._iconPath; - item.contextValue = this._nodeType; - item.tooltip = this._description; - item.iconPath = this._iconPath; - return item; - } - - public getNodeInfo(): azdata.NodeInfo { - return { - label: this.label, - isLeaf: this.isLeaf, - errorMessage: undefined, - metadata: undefined, - nodePath: this.nodePath, - nodeStatus: undefined, - nodeType: this._nodeType, - iconType: this._nodeType, - nodeSubType: undefined - }; - } - - public get description(): string { - return this._description; - } - - public set description(description: string) { - this._description = description; - } - - public get nodeType(): string { - return this._nodeType; - } - - public set nodeType(nodeType: string) { - this._nodeType = nodeType; - } - - public set iconPath(iconPath: IconPath) { - this._iconPath = iconPath; - } - - public get iconPath(): IconPath { - return this._iconPath; - } - - public set treeChangeHandler(treeChangeHandler: IControllerTreeChangeHandler) { - this._treeChangeHandler = treeChangeHandler; - } - - public get treeChangeHandler(): IControllerTreeChangeHandler { - return this._treeChangeHandler; - } -} - -export class ControllerRootNode extends ControllerTreeNode { - - constructor(treeChangeHandler: IControllerTreeChangeHandler) { - super('root', undefined, treeChangeHandler, undefined, BdcItemType.controllerRoot); - } - - public override async getChildren(): Promise { - return this.children as ControllerNode[]; - } - - /** - * Creates or updates a node in the tree with the specified connection information - * @param url The URL for the BDC management endpoint - * @param auth The type of auth to use - * @param username The username (if basic auth) - * @param password The password (if basic auth) - * @param rememberPassword Whether to store the password in the password store when saving - */ - public addOrUpdateControllerNode( - url: string, - auth: AuthType, - username: string, - password: string, - rememberPassword: boolean - ): void { - let controllerNode = this.getExistingControllerNode(url, auth, username); - if (controllerNode) { - controllerNode.password = password; - controllerNode.rememberPassword = rememberPassword; - controllerNode.clearChildren(); - } else { - controllerNode = new ControllerNode(url, auth, username, password, rememberPassword, undefined, this, this.treeChangeHandler, undefined); - this.addChild(controllerNode); - } - } - - public removeControllerNode(url: string, auth: AuthType, username: string): ControllerNode[] | undefined { - if (!url || (auth === 'basic' && !username)) { - return undefined; - } - let nodes = this.children as ControllerNode[]; - let index = nodes.findIndex(e => isControllerMatch(e, url, auth, username)); - let deleted: ControllerNode[] | undefined; - if (index >= 0) { - deleted = nodes.splice(index, 1); - } - return deleted; - } - - private getExistingControllerNode(url: string, auth: AuthType, username: string): ControllerNode | undefined { - if (!url || !username) { - return undefined; - } - let nodes = this.children as ControllerNode[]; - return nodes.find(e => isControllerMatch(e, url, auth, username)); - } -} - -export class ControllerNode extends ControllerTreeNode { - - constructor( - private _url: string, - private _auth: AuthType, - private _username: string, - private _password: string, - private _rememberPassword: boolean, - label: string, - parent: ControllerTreeNode, - treeChangeHandler: IControllerTreeChangeHandler, - description?: string, - ) { - super(label, parent, treeChangeHandler, description, BdcItemType.controller, IconPathHelper.controllerNode); - this.label = label; - this.description = description; - } - - public override async getChildren(): Promise { - if (this.children && this.children.length > 0) { - this.clearChildren(); - } - - if (!this._password) { - vscode.commands.executeCommand('bigDataClusters.command.connectController', this); - return this.children as ControllerTreeNode[]; - } - return undefined; - } - - public static toIpAndPort(url: string): string | undefined { - if (!url) { - return undefined; - } - return url.trim().replace(/ /g, '').replace(/^.+\:\/\//, ''); - } - - public get url(): string { - return this._url; - } - - - public get auth(): AuthType { - return this._auth; - } - - - public get username(): string { - return this._username; - } - - public get password(): string { - return this._password; - } - - public set password(pw: string) { - this._password = pw; - } - - public override set label(label: string) { - super.label = label || this.generateLabel(); - } - - public get rememberPassword() { - return this._rememberPassword; - } - - public set rememberPassword(rememberPassword: boolean) { - this._rememberPassword = rememberPassword; - } - - private generateLabel(): string { - let label = `controller: ${ControllerNode.toIpAndPort(this._url)}`; - if (this._auth === 'basic') { - label += ` (${this._username})`; - } - return label; - } - - public override get label(): string { - return super.label; - } - - public override set description(description: string) { - super.description = description || super.label; - } - - public override get description(): string { - return super.description; - } -} - -function isControllerMatch(node: ControllerNode, url: string, auth: string, username: string): unknown { - return node.url === url && node.auth === auth && node.username === username; -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/tree/treeNode.ts b/extensions/big-data-cluster/src/bigDataCluster/tree/treeNode.ts deleted file mode 100644 index 5fdccdc65e..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/tree/treeNode.ts +++ /dev/null @@ -1,193 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { generateGuid } from '../utils'; - -export abstract class TreeNode { - - private _id: string; - private _children: TreeNode[]; - private _isLeaf: boolean; - - constructor(private _label: string, private _parent?: TreeNode) { - this.resetId(); - } - - public resetId(): void { - this._id = (this._label || '_') + `::${generateGuid()}`; - } - - public get id(): string { - return this._id; - } - - public set label(label: string) { - if (!this._label) { - this._label = label; - this.resetId(); - } else { - this._label = label; - } - } - - public get label(): string { - return this._label; - } - - public set parent(parent: TreeNode) { - this._parent = parent; - } - - public get parent(): TreeNode { - return this._parent; - } - - public get children(): TreeNode[] { - if (!this._children) { - this._children = []; - } - return this._children; - } - - public get hasChildren(): boolean { - return this.children && this.children.length > 0; - } - - public set isLeaf(isLeaf: boolean) { - this._isLeaf = isLeaf; - } - - public get isLeaf(): boolean { - return this._isLeaf; - } - - public get root(): TreeNode { - return TreeNode.getRoot(this); - } - - public equals(node: TreeNode): boolean { - if (!node) { - return undefined; - } - return this.nodePath === node.nodePath; - } - - public refresh(): void { - this.resetId(); - } - - public static getRoot(node: TreeNode): TreeNode { - if (!node) { - return undefined; - } - let current: TreeNode = node; - while (current.parent) { - current = current.parent; - } - return current; - } - - public get nodePath(): string { - return TreeNode.getNodePath(this); - } - - public static getNodePath(node: TreeNode): string { - if (!node) { - return undefined; - } - - let current: TreeNode = node; - let path = current._id; - while (current.parent) { - current = current.parent; - path = `${current._id}/${path}`; - } - return path; - } - - public async findNode(condition: (node: TreeNode) => boolean, expandIfNeeded?: boolean): Promise { - return TreeNode.findNode(this, condition, expandIfNeeded); - } - - public static async findNode(node: TreeNode, condition: (node: TreeNode) => boolean, expandIfNeeded?: boolean): Promise { - if (!node || !condition) { - return undefined; - } - let result: TreeNode = undefined; - let nodesToCheck: TreeNode[] = [node]; - while (nodesToCheck.length > 0) { - let current = nodesToCheck.shift(); - if (condition(current)) { - result = current; - break; - } - if (current.hasChildren) { - nodesToCheck = nodesToCheck.concat(current.children); - } else if (expandIfNeeded) { - let children = await current.getChildren(); - if (children && children.length > 0) { - nodesToCheck = nodesToCheck.concat(children); - } - } - } - return result; - } - - public async filterNode(condition: (node: TreeNode) => boolean, expandIfNeeded?: boolean): Promise { - return TreeNode.filterNode(this, condition, expandIfNeeded); - } - - public static async filterNode(node: TreeNode, condition: (node: TreeNode) => boolean, expandIfNeeded?: boolean): Promise { - if (!node || !condition) { - return undefined; - } - let result: TreeNode[] = []; - let nodesToCheck: TreeNode[] = [node]; - while (nodesToCheck.length > 0) { - let current = nodesToCheck.shift(); - if (condition(current)) { - result.push(current); - } - if (current.hasChildren) { - nodesToCheck = nodesToCheck.concat(current.children); - } else if (expandIfNeeded) { - let children = await current.getChildren(); - if (children && children.length > 0) { - nodesToCheck = nodesToCheck.concat(children); - } - } - } - return result; - } - - public async findNodeByPath(path: string, expandIfNeeded?: boolean): Promise { - return TreeNode.findNodeByPath(this, path, expandIfNeeded); - } - - public static async findNodeByPath(node: TreeNode, path: string, expandIfNeeded?: boolean): Promise { - return TreeNode.findNode(node, node => { - return node.nodePath && (node.nodePath === path || node.nodePath.startsWith(path)); - }, expandIfNeeded); - } - - public addChild(node: TreeNode): void { - if (!this._children) { - this._children = []; - } - this._children.push(node); - } - - public clearChildren(): void { - if (this._children) { - this._children = []; - } - } - - public abstract getChildren(): Promise; - public abstract getTreeItem(): vscode.TreeItem; - public abstract getNodeInfo(): azdata.NodeInfo; -} diff --git a/extensions/big-data-cluster/src/bigDataCluster/utils.ts b/extensions/big-data-cluster/src/bigDataCluster/utils.ts deleted file mode 100644 index 20ff0e81ee..0000000000 --- a/extensions/big-data-cluster/src/bigDataCluster/utils.ts +++ /dev/null @@ -1,289 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import * as constants from './constants'; -const localize = nls.loadMessageBundle(); - -export enum Endpoint { - gateway = 'gateway', - sparkHistory = 'spark-history', - yarnUi = 'yarn-ui', - appProxy = 'app-proxy', - mgmtproxy = 'mgmtproxy', - managementProxy = 'management-proxy', - logsui = 'logsui', - metricsui = 'metricsui', - controller = 'controller', - sqlServerMaster = 'sql-server-master', - webhdfs = 'webhdfs', - livy = 'livy' -} - -export enum Service { - sql = 'sql', - hdfs = 'hdfs', - spark = 'spark', - control = 'control', - gateway = 'gateway', - app = 'app' -} - -export function generateGuid(): string { - let hexValues: string[] = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']; - let oct: string = ''; - let tmp: number; - for (let a: number = 0; a < 4; a++) { - tmp = (4294967296 * Math.random()) | 0; - oct += hexValues[tmp & 0xF] + - hexValues[tmp >> 4 & 0xF] + - hexValues[tmp >> 8 & 0xF] + - hexValues[tmp >> 12 & 0xF] + - hexValues[tmp >> 16 & 0xF] + - hexValues[tmp >> 20 & 0xF] + - hexValues[tmp >> 24 & 0xF] + - hexValues[tmp >> 28 & 0xF]; - } - let clockSequenceHi: string = hexValues[8 + (Math.random() * 4) | 0]; - return oct.substr(0, 8) + '-' + oct.substr(9, 4) + '-4' + oct.substr(13, 3) + '-' + clockSequenceHi + oct.substr(16, 3) + '-' + oct.substr(19, 12); -} - -export function showErrorMessage(error: any, prefixText?: string): void { - if (error) { - let text: string = prefixText || ''; - if (typeof error === 'string') { - text += error as string; - } else if (typeof error === 'object' && error !== null) { - text += error.message; - if (error.code && error.code > 0) { - text += ` (${error.code})`; - } - } else { - text += `${error}`; - } - vscode.window.showErrorMessage(text); - } -} - -/** - * Mappings of the different expected state values to their localized friendly names. - * These are defined in aris/projects/controller/src/Microsoft.SqlServer.Controller/StateMachines - */ -const stateToDisplayTextMap: { [key: string]: string } = { - // K8sScaledSetStateMachine - 'creating': localize('state.creating', "Creating"), - 'waiting': localize('state.waiting', "Waiting"), - 'ready': localize('state.ready', "Ready"), - 'deleting': localize('state.deleting', "Deleting"), - 'deleted': localize('state.deleted', "Deleted"), - 'applyingupgrade': localize('state.applyingUpgrade', "Applying Upgrade"), - 'upgrading': localize('state.upgrading', "Upgrading"), - 'applyingmanagedupgrade': localize('state.applyingmanagedupgrade', "Applying Managed Upgrade"), - 'managedupgrading': localize('state.managedUpgrading', "Managed Upgrading"), - 'rollback': localize('state.rollback', "Rollback"), - 'rollbackinprogress': localize('state.rollbackInProgress', "Rollback In Progress"), - 'rollbackcomplete': localize('state.rollbackComplete', "Rollback Complete"), - 'error': localize('state.error', "Error"), - - // BigDataClusterStateMachine - 'creatingsecrets': localize('state.creatingSecrets', "Creating Secrets"), - 'waitingforsecrets': localize('state.waitingForSecrets', "Waiting For Secrets"), - 'creatinggroups': localize('state.creatingGroups', "Creating Groups"), - 'waitingforgroups': localize('state.waitingForGroups', "Waiting For Groups"), - 'creatingresources': localize('state.creatingResources', "Creating Resources"), - 'waitingforresources': localize('state.waitingForResources', "Waiting For Resources"), - 'creatingkerberosdelegationsetup': localize('state.creatingKerberosDelegationSetup', "Creating Kerberos Delegation Setup"), - 'waitingforkerberosdelegationsetup': localize('state.waitingForKerberosDelegationSetup', "Waiting For Kerberos Delegation Setup"), - 'waitingfordeletion': localize('state.waitingForDeletion', "Waiting For Deletion"), - 'waitingforupgrade': localize('state.waitingForUpgrade', "Waiting For Upgrade"), - 'upgradePaused': localize('state.upgradePaused', "Upgrade Paused"), - - // Other - 'running': localize('state.running', "Running"), -}; - -/** - * Gets the localized text to display for a corresponding state - * @param state The state to get the display text for - */ -export function getStateDisplayText(state?: string): string { - state = state || ''; - return stateToDisplayTextMap[state.toLowerCase()] || state; -} - -/** - * Gets the localized text to display for a corresponding endpoint - * @param endpointName The endpoint name to get the display text for - * @param description The backup description to use if we don't have our own - */ -export function getEndpointDisplayText(endpointName?: string, description?: string): string { - endpointName = endpointName || ''; - switch (endpointName.toLowerCase()) { - case Endpoint.appProxy: - return localize('endpoint.appproxy', "Application Proxy"); - case Endpoint.controller: - return localize('endpoint.controller', "Cluster Management Service"); - case Endpoint.gateway: - return localize('endpoint.gateway', "Gateway to access HDFS files, Spark"); - case Endpoint.managementProxy: - return localize('endpoint.managementproxy', "Management Proxy"); - case Endpoint.mgmtproxy: - return localize('endpoint.mgmtproxy', "Management Proxy"); - case Endpoint.sqlServerMaster: - return localize('endpoint.sqlServerEndpoint', "SQL Server Master Instance Front-End"); - case Endpoint.metricsui: - return localize('endpoint.grafana', "Metrics Dashboard"); - case Endpoint.logsui: - return localize('endpoint.kibana', "Log Search Dashboard"); - case Endpoint.yarnUi: - return localize('endpoint.yarnHistory', "Spark Diagnostics and Monitoring Dashboard"); - case Endpoint.sparkHistory: - return localize('endpoint.sparkHistory', "Spark Jobs Management and Monitoring Dashboard"); - case Endpoint.webhdfs: - return localize('endpoint.webhdfs', "HDFS File System Proxy"); - case Endpoint.livy: - return localize('endpoint.livy', "Proxy for running Spark statements, jobs, applications"); - default: - // Default is to use the description if one was given, otherwise worst case just fall back to using the - // original endpoint name - return description && description.length > 0 ? description : endpointName; - } -} - -/** - * Gets the localized text to display for a corresponding service - * @param serviceName The service name to get the display text for - */ -export function getServiceNameDisplayText(serviceName?: string): string { - serviceName = serviceName || ''; - switch (serviceName.toLowerCase()) { - case Service.sql: - return localize('service.sql', "SQL Server"); - case Service.hdfs: - return localize('service.hdfs', "HDFS"); - case Service.spark: - return localize('service.spark', "Spark"); - case Service.control: - return localize('service.control', "Control"); - case Service.gateway: - return localize('service.gateway', "Gateway"); - case Service.app: - return localize('service.app', "App"); - default: - return serviceName; - } -} - -/** - * Gets the localized text to display for a corresponding health status - * @param healthStatus The health status to get the display text for - */ -export function getHealthStatusDisplayText(healthStatus?: string) { - healthStatus = healthStatus || ''; - switch (healthStatus.toLowerCase()) { - case 'healthy': - return localize('bdc.healthy', "Healthy"); - case 'unhealthy': - return localize('bdc.unhealthy', "Unhealthy"); - default: - return healthStatus; - } -} - -/** - * Returns the status icon for the corresponding health status - * @param healthStatus The status to check - */ -export function getHealthStatusIcon(healthStatus?: string): string { - healthStatus = healthStatus || ''; - switch (healthStatus.toLowerCase()) { - case 'healthy': - return '✔️'; - default: - // Consider all non-healthy status' as errors - return '⚠️'; - } -} - -/** - * Returns the status dot icon which will be a • for all non-healthy states - * @param healthStatus The status to check - */ -export function getHealthStatusDotIcon(healthStatus?: string): constants.IconPath { - healthStatus = healthStatus || ''; - switch (healthStatus.toLowerCase()) { - case 'healthy': - return constants.IconPathHelper.status_circle_blank; - default: - // Display status dot for all non-healthy status' - return constants.IconPathHelper.status_circle_red; - } -} - - -interface RawEndpoint { - serviceName: string; - description?: string; - endpoint?: string; - protocol?: string; - ipAddress?: string; - port?: number; -} - -interface IEndpoint { - serviceName: string; - description: string; - endpoint: string; - protocol: string; -} - -function getClusterEndpoints(serverInfo: azdata.ServerInfo): IEndpoint[] { - let endpoints: RawEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty]; - if (!endpoints || endpoints.length === 0) { return []; } - - return endpoints.map(e => { - // If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS - let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`; - let updatedEndpoint: IEndpoint = { - serviceName: e.serviceName, - description: e.description, - endpoint: endpoint, - protocol: e.protocol - }; - return updatedEndpoint; - }); -} - -export function getControllerEndpoint(serverInfo: azdata.ServerInfo): string | undefined { - let endpoints = getClusterEndpoints(serverInfo); - if (endpoints) { - let index = endpoints.findIndex(ep => ep.serviceName.toLowerCase() === constants.controllerEndpointName.toLowerCase()); - if (index < 0) { return undefined; } - return endpoints[index].endpoint; - } - return undefined; -} - -export function getBdcStatusErrorMessage(error: Error): string { - return localize('endpointsError', "Unexpected error retrieving BDC Endpoints: {0}", error.message); -} - -const bdcConfigSectionName = 'bigDataCluster'; -const ignoreSslConfigName = 'ignoreSslVerification'; - -/** - * Retrieves the current setting for whether to ignore SSL verification errors - */ -export function getIgnoreSslVerificationConfigSetting(): boolean { - try { - const config = vscode.workspace.getConfiguration(bdcConfigSectionName); - return config.get(ignoreSslConfigName, true); - } catch (error) { - console.error(`Unexpected error retrieving ${bdcConfigSectionName}.${ignoreSslConfigName} setting : ${error}`); - } - return true; -} diff --git a/extensions/big-data-cluster/src/commands.ts b/extensions/big-data-cluster/src/commands.ts deleted file mode 100644 index af540ea899..0000000000 --- a/extensions/big-data-cluster/src/commands.ts +++ /dev/null @@ -1,13 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -export const ManageControllerCommand = 'bigDataClusters.command.manageController'; -export const CreateControllerCommand = 'bigDataClusters.command.createController'; -export const ConnectControllerCommand = 'bigDataClusters.command.connectController'; -export const RemoveControllerCommand = 'bigDataClusters.command.removeController'; -export const RefreshControllerCommand = 'bigDataClusters.command.refreshController'; -export const MountHdfsCommand = 'bigDataClusters.command.mount'; -export const RefreshMountCommand = 'bigDataClusters.command.refreshmount'; -export const DeleteMountCommand = 'bigDataClusters.command.deletemount'; diff --git a/extensions/big-data-cluster/src/common/promise.ts b/extensions/big-data-cluster/src/common/promise.ts deleted file mode 100644 index 68f39d4dcd..0000000000 --- a/extensions/big-data-cluster/src/common/promise.ts +++ /dev/null @@ -1,25 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -/** - * Deferred promise - */ -export class Deferred { - promise: Promise; - resolve: (value?: T | PromiseLike) => void; - reject: (reason?: any) => void; - constructor() { - this.promise = new Promise((resolve, reject) => { - this.resolve = resolve; - this.reject = reject; - }); - } - - then(onfulfilled?: (value: T) => TResult | Thenable, onrejected?: (reason: any) => TResult | Thenable): Thenable; - then(onfulfilled?: (value: T) => TResult | Thenable, onrejected?: (reason: any) => void): Thenable; - then(onfulfilled?: (value: T) => TResult | Thenable, onrejected?: (reason: any) => TResult | Thenable): Thenable { - return this.promise.then(onfulfilled, onrejected); - } -} diff --git a/extensions/big-data-cluster/src/extension.ts b/extensions/big-data-cluster/src/extension.ts deleted file mode 100644 index 699d48462e..0000000000 --- a/extensions/big-data-cluster/src/extension.ts +++ /dev/null @@ -1,225 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import { ControllerTreeDataProvider } from './bigDataCluster/tree/controllerTreeDataProvider'; -import { IconPathHelper } from './bigDataCluster/constants'; -import { TreeNode } from './bigDataCluster/tree/treeNode'; -import { AddControllerDialogModel, AddControllerDialog } from './bigDataCluster/dialog/addControllerDialog'; -import { ControllerNode } from './bigDataCluster/tree/controllerTreeNode'; -import { BdcDashboard } from './bigDataCluster/dialog/bdcDashboard'; -import { BdcDashboardModel, BdcDashboardOptions } from './bigDataCluster/dialog/bdcDashboardModel'; -import { MountHdfsDialogModel as MountHdfsModel, MountHdfsProperties, MountHdfsDialog, DeleteMountDialog, DeleteMountModel, RefreshMountDialog, RefreshMountModel } from './bigDataCluster/dialog/mountHdfsDialog'; -import { getControllerEndpoint } from './bigDataCluster/utils'; -import * as commands from './commands'; -import { HdfsDialogCancelledError } from './bigDataCluster/dialog/hdfsDialogBase'; -import { IExtension, AuthType, IClusterController } from 'bdc'; -import { ClusterController } from './bigDataCluster/controller/clusterControllerApi'; - -const localize = nls.loadMessageBundle(); - -const endpointNotFoundError = localize('mount.error.endpointNotFound', "Controller endpoint information was not found"); - -let throttleTimers: { [key: string]: any } = {}; - -export async function activate(extensionContext: vscode.ExtensionContext): Promise { - IconPathHelper.setExtensionContext(extensionContext); - await vscode.commands.executeCommand('setContext', 'bdc.loaded', false); - const treeDataProvider = new ControllerTreeDataProvider(extensionContext.globalState); - let controllers: any[] = extensionContext.globalState.get('controllers', []); - if (controllers.length > 0) { - const deprecationNoticeKey = 'bdc.deprecationNoticeShown'; - const deprecationNoticeShown = extensionContext.globalState.get(deprecationNoticeKey, false); - if (!deprecationNoticeShown) { - void vscode.window.showWarningMessage(localize('bdc.deprecationWarning', 'The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340).')); - void extensionContext.globalState.update(deprecationNoticeKey, true); - } - } - vscode.window.registerTreeDataProvider('sqlBigDataCluster', treeDataProvider); - registerCommands(extensionContext, treeDataProvider); - return { - getClusterController(url: string, authType: AuthType, username?: string, password?: string): IClusterController { - return new ClusterController(url, authType, username, password); - } - }; -} - -export function deactivate() { -} - -function registerCommands(context: vscode.ExtensionContext, treeDataProvider: ControllerTreeDataProvider): void { - vscode.commands.registerCommand(commands.ConnectControllerCommand, (node?: TreeNode) => { - runThrottledAction(commands.ConnectControllerCommand, () => addBdcController(treeDataProvider, node)); - }); - - vscode.commands.registerCommand(commands.CreateControllerCommand, () => { - runThrottledAction(commands.CreateControllerCommand, () => vscode.commands.executeCommand('azdata.resource.deploy', 'sql-bdc', ['sql-bdc'])); - }); - - vscode.commands.registerCommand(commands.RemoveControllerCommand, async (node: TreeNode) => { - await deleteBdcController(treeDataProvider, node); - }); - - vscode.commands.registerCommand(commands.RefreshControllerCommand, (node: TreeNode) => { - if (!node) { - return; - } - treeDataProvider.notifyNodeChanged(node); - }); - - vscode.commands.registerCommand(commands.ManageControllerCommand, async (info: ControllerNode | BdcDashboardOptions, addOrUpdateController: boolean = false) => { - const title: string = `${localize('bdc.dashboard.title', "Big Data Cluster Dashboard -")} ${ControllerNode.toIpAndPort(info.url)}`; - if (addOrUpdateController) { - // The info may be wrong, but if it is then we'll prompt to reconnect when the dashboard is opened - // and update with the correct info then - treeDataProvider.addOrUpdateController( - info.url, - info.auth, - info.username, - info.password, - info.rememberPassword); - await treeDataProvider.saveControllers(); - } - const dashboard: BdcDashboard = new BdcDashboard(title, new BdcDashboardModel(info, treeDataProvider)); - await dashboard.showDashboard(); - }); - - vscode.commands.registerCommand(commands.MountHdfsCommand, e => mountHdfs(e).catch(error => { - vscode.window.showErrorMessage(error instanceof Error ? error.message : error); - })); - vscode.commands.registerCommand(commands.RefreshMountCommand, e => refreshMount(e).catch(error => { - vscode.window.showErrorMessage(error instanceof Error ? error.message : error); - })); - vscode.commands.registerCommand(commands.DeleteMountCommand, e => deleteMount(e).catch(error => { - vscode.window.showErrorMessage(error instanceof Error ? error.message : error); - })); -} - -async function mountHdfs(explorerContext?: azdata.ObjectExplorerContext): Promise { - const mountProps = await getMountProps(explorerContext); - if (mountProps) { - const dialog = new MountHdfsDialog(new MountHdfsModel(mountProps)); - try { - await dialog.showDialog(); - } catch (error) { - if (!(error instanceof HdfsDialogCancelledError)) { - throw error; - } - } - - } -} - -async function refreshMount(explorerContext?: azdata.ObjectExplorerContext): Promise { - const mountProps = await getMountProps(explorerContext); - if (mountProps) { - const dialog = new RefreshMountDialog(new RefreshMountModel(mountProps)); - await dialog.showDialog(); - } -} - -async function deleteMount(explorerContext?: azdata.ObjectExplorerContext): Promise { - const mountProps = await getMountProps(explorerContext); - if (mountProps) { - const dialog = new DeleteMountDialog(new DeleteMountModel(mountProps)); - await dialog.showDialog(); - } -} - -async function getMountProps(explorerContext?: azdata.ObjectExplorerContext): Promise { - let endpoint = await lookupController(explorerContext); - if (!endpoint) { - vscode.window.showErrorMessage(endpointNotFoundError); - return undefined; - } - let profile = explorerContext.connectionProfile; - let mountProps: MountHdfsProperties = { - url: endpoint, - auth: profile.authenticationType === azdata.connection.AuthenticationType.SqlLogin ? 'basic' : 'integrated', - username: profile.userName, - password: profile.password, - hdfsPath: getHdsfPath(explorerContext.nodeInfo.nodePath) - }; - return mountProps; -} - -function getHdsfPath(nodePath: string): string { - const hdfsNodeLabel = '/HDFS'; - let index = nodePath.indexOf(hdfsNodeLabel); - if (index >= 0) { - let subPath = nodePath.substring(index + hdfsNodeLabel.length); - return subPath.length > 0 ? subPath : '/'; - } - // Use the root - return '/'; -} - -async function lookupController(explorerContext?: azdata.ObjectExplorerContext): Promise { - if (!explorerContext) { - return undefined; - } - - let serverInfo = await azdata.connection.getServerInfo(explorerContext.connectionProfile.id); - if (!serverInfo || !serverInfo.options) { - vscode.window.showErrorMessage(endpointNotFoundError); - return undefined; - } - return getControllerEndpoint(serverInfo); -} - -function addBdcController(treeDataProvider: ControllerTreeDataProvider, node?: TreeNode): void { - let model = new AddControllerDialogModel(treeDataProvider, node as ControllerNode); - let dialog = new AddControllerDialog(model); - dialog.showDialog(); -} - -async function deleteBdcController(treeDataProvider: ControllerTreeDataProvider, node: TreeNode): Promise { - if (!node && !(node instanceof ControllerNode)) { - return undefined; - } - - let controllerNode = node as ControllerNode; - - let choices: { [id: string]: boolean } = {}; - choices[localize('textYes', "Yes")] = true; - choices[localize('textNo', "No")] = false; - - let options = { - ignoreFocusOut: false, - placeHolder: localize('textConfirmRemoveController', "Are you sure you want to remove \'{0}\'?", controllerNode.label) - }; - - let result = await vscode.window.showQuickPick(Object.keys(choices), options); - let remove: boolean = !!(result && choices[result]); - if (remove) { - await removeControllerInternal(treeDataProvider, controllerNode); - } - return remove; -} - -async function removeControllerInternal(treeDataProvider: ControllerTreeDataProvider, controllerNode: ControllerNode): Promise { - const removed = treeDataProvider.removeController(controllerNode.url, controllerNode.auth, controllerNode.username); - if (removed) { - await treeDataProvider.saveControllers(); - } -} - -/** - * Throttles actions to avoid bug where on clicking in tree, action gets called twice - * instead of once. Any right-click action is safe, just the default on-click action in a tree - */ -function runThrottledAction(id: string, action: () => void) { - let timer = throttleTimers[id]; - if (!timer) { - throttleTimers[id] = timer = setTimeout(() => { - action(); - clearTimeout(timer); - throttleTimers[id] = undefined; - }, 150); - } - // else ignore this as we got an identical action in the last 150ms -} diff --git a/extensions/big-data-cluster/src/typings/refs.d.ts b/extensions/big-data-cluster/src/typings/refs.d.ts deleted file mode 100644 index 420c12b6ad..0000000000 --- a/extensions/big-data-cluster/src/typings/refs.d.ts +++ /dev/null @@ -1,9 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -/// -/// -/// -/// diff --git a/extensions/big-data-cluster/tsconfig.json b/extensions/big-data-cluster/tsconfig.json deleted file mode 100644 index 10d3ba40f1..0000000000 --- a/extensions/big-data-cluster/tsconfig.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "extends": "../tsconfig.base.json", - "compileOnSave": true, - "compilerOptions": { - "outDir": "./out", - "lib": [ - "es6", - "es2015.promise" - ], - "emitDecoratorMetadata": true, - "experimentalDecorators": true, - "moduleResolution": "node", - "declaration": false, - "typeRoots": [ - "./node_modules/@types" - ], - "strict": false, - "noUnusedParameters": false, - "strictNullChecks": false - }, - "exclude": [ - "node_modules" - ] -} diff --git a/extensions/big-data-cluster/yarn.lock b/extensions/big-data-cluster/yarn.lock deleted file mode 100644 index 3ffbb07538..0000000000 --- a/extensions/big-data-cluster/yarn.lock +++ /dev/null @@ -1,377 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@microsoft/ads-kerberos@^1.1.3": - version "1.1.3" - resolved "https://registry.yarnpkg.com/@microsoft/ads-kerberos/-/ads-kerberos-1.1.3.tgz#a10c6d2d0751c0b67548d51a4bc45a7c33d00088" - integrity sha512-jlji4IUfkA/4idYBN9tyouCwurfyGZrDzMGlaUmx9/vtwRvdoEFCg959WfhShcfMPVLku/bztmLeZtN1ifccnQ== - dependencies: - nan "^2.14.0" - -"@types/caseless@*": - version "0.12.2" - resolved "https://registry.yarnpkg.com/@types/caseless/-/caseless-0.12.2.tgz#f65d3d6389e01eeb458bd54dc8f52b95a9463bc8" - integrity sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w== - -"@types/node@*": - version "12.12.6" - resolved "https://registry.yarnpkg.com/@types/node/-/node-12.12.6.tgz#a47240c10d86a9a57bb0c633f0b2e0aea9ce9253" - integrity sha512-FjsYUPzEJdGXjwKqSpE0/9QEh6kzhTAeObA54rn6j3rR4C/mzpI9L0KNfoeASSPMMdxIsoJuCLDWcM/rVjIsSA== - -"@types/request@^2.48.3": - version "2.48.3" - resolved "https://registry.yarnpkg.com/@types/request/-/request-2.48.3.tgz#970b8ed2317568c390361d29c555a95e74bd6135" - integrity sha512-3Wo2jNYwqgXcIz/rrq18AdOZUQB8cQ34CXZo+LUwPJNpvRAL86+Kc2wwI8mqpz9Cr1V+enIox5v+WZhy/p3h8w== - dependencies: - "@types/caseless" "*" - "@types/node" "*" - "@types/tough-cookie" "*" - form-data "^2.5.0" - -"@types/tough-cookie@*": - version "2.3.5" - resolved "https://registry.yarnpkg.com/@types/tough-cookie/-/tough-cookie-2.3.5.tgz#9da44ed75571999b65c37b60c9b2b88db54c585d" - integrity sha512-SCcK7mvGi3+ZNz833RRjFIxrn4gI1PPR3NtuIS+6vMkvmsGjosqTJwRt5bAEFLRz+wtJMWv8+uOnZf2hi2QXTg== - -ajv@^6.5.5: - version "6.12.6" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" - integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== - dependencies: - fast-deep-equal "^3.1.1" - fast-json-stable-stringify "^2.0.0" - json-schema-traverse "^0.4.1" - uri-js "^4.2.2" - -asn1@~0.2.3: - version "0.2.4" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" - integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg== - dependencies: - safer-buffer "~2.1.0" - -assert-plus@1.0.0, assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= - -aws-sign2@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= - -aws4@^1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" - integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ== - -bcrypt-pbkdf@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= - dependencies: - tweetnacl "^0.14.3" - -caseless@~0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= - -combined-stream@^1.0.6, combined-stream@~1.0.6: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -core-util-is@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= - -dashdash@^1.12.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= - dependencies: - assert-plus "^1.0.0" - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= - -ecc-jsbn@~0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" - integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk= - dependencies: - jsbn "~0.1.0" - safer-buffer "^2.1.0" - -extend@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== - -extsprintf@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= - -extsprintf@^1.2.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" - integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= - -fast-deep-equal@^3.1.1: - version "3.1.3" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" - integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== - -fast-json-stable-stringify@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" - integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== - -forever-agent@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= - -form-data@^2.5.0: - version "2.5.1" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.5.1.tgz#f2cbec57b5e59e23716e128fe44d4e5dd23895f4" - integrity sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" - -form-data@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" - integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" - -getpass@^0.1.1: - version "0.1.7" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= - dependencies: - assert-plus "^1.0.0" - -har-schema@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" - integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= - -har-validator@~5.1.0: - version "5.1.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" - integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== - dependencies: - ajv "^6.5.5" - har-schema "^2.0.0" - -http-signature@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" - integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE= - dependencies: - assert-plus "^1.0.0" - jsprim "^1.2.2" - sshpk "^1.7.0" - -is-typedarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= - -isstream@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= - -jsbn@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= - -json-schema-traverse@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" - integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== - -json-schema@0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" - integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== - -json-stringify-safe@~5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= - -jsprim@^1.2.2: - version "1.4.2" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.2.tgz#712c65533a15c878ba59e9ed5f0e26d5b77c5feb" - integrity sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw== - dependencies: - assert-plus "1.0.0" - extsprintf "1.3.0" - json-schema "0.4.0" - verror "1.10.0" - -mime-db@1.40.0: - version "1.40.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.40.0.tgz#a65057e998db090f732a68f6c276d387d4126c32" - integrity sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA== - -mime-types@^2.1.12, mime-types@~2.1.19: - version "2.1.24" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.24.tgz#b6f8d0b3e951efb77dedeca194cff6d16f676f81" - integrity sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ== - dependencies: - mime-db "1.40.0" - -nan@^2.14.0: - version "2.15.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.15.0.tgz#3f34a473ff18e15c1b5626b62903b5ad6e665fee" - integrity sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ== - -oauth-sign@~0.9.0: - version "0.9.0" - resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" - integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== - -performance-now@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" - integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= - -psl@^1.1.24: - version "1.1.32" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.1.32.tgz#3f132717cf2f9c169724b2b6caf373cf694198db" - integrity sha512-MHACAkHpihU/REGGPLj4sEfc/XKW2bheigvHO1dUqjaKigMp1C8+WLQYRGgeKFMsw5PMfegZcaN8IDXK/cD0+g== - -punycode@^1.4.1: - version "1.4.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" - integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= - -punycode@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" - integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== - -qs@~6.5.2: - version "6.5.3" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" - integrity sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA== - -request@^2.88.0: - version "2.88.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" - integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - har-validator "~5.1.0" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - oauth-sign "~0.9.0" - performance-now "^2.1.0" - qs "~6.5.2" - safe-buffer "^5.1.2" - tough-cookie "~2.4.3" - tunnel-agent "^0.6.0" - uuid "^3.3.2" - -safe-buffer@^5.0.1, safe-buffer@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== - -sshpk@^1.7.0: - version "1.16.1" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" - integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg== - dependencies: - asn1 "~0.2.3" - assert-plus "^1.0.0" - bcrypt-pbkdf "^1.0.0" - dashdash "^1.12.0" - ecc-jsbn "~0.1.1" - getpass "^0.1.1" - jsbn "~0.1.0" - safer-buffer "^2.0.2" - tweetnacl "~0.14.0" - -tough-cookie@~2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" - integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ== - dependencies: - psl "^1.1.24" - punycode "^1.4.1" - -tunnel-agent@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= - dependencies: - safe-buffer "^5.0.1" - -tweetnacl@^0.14.3, tweetnacl@~0.14.0: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= - -uri-js@^4.2.2: - version "4.4.1" - resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" - integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== - dependencies: - punycode "^2.1.0" - -uuid@^3.3.2: - version "3.3.2" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" - integrity sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA== - -verror@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= - dependencies: - assert-plus "^1.0.0" - core-util-is "1.0.2" - extsprintf "^1.2.0" - -vscode-nls@^4.0.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/vscode-nls/-/vscode-nls-4.1.1.tgz#f9916b64e4947b20322defb1e676a495861f133c" - integrity sha512-4R+2UoUUU/LdnMnFjePxfLqNhBS8lrAFyX7pjb2ud/lqDkrUavFUTcG7wR0HBZFakae0Q6KLBFjMS6W93F403A== diff --git a/extensions/integration-tests/setEnvironmentVariables.js b/extensions/integration-tests/setEnvironmentVariables.js index 2613749910..ca7a4acf5c 100644 --- a/extensions/integration-tests/setEnvironmentVariables.js +++ b/extensions/integration-tests/setEnvironmentVariables.js @@ -90,9 +90,6 @@ const AKV_URL = 'https://sqltoolssecretstore.vault.azure.net/'; const SECRET_AZURE_SERVER = 'ads-integration-test-azure-server'; const SECRET_AZURE_SERVER_USERNAME = 'ads-integration-test-azure-server-username'; const SECRET_AZURE_SERVER_PASSWORD = 'ads-integration-test-azure-server-password'; -const SECRET_BDC_SERVER = 'ads-integration-test-bdc-server'; -const SECRET_BDC_SERVER_USERNAME = 'ads-integration-test-bdc-server-username'; -const SECRET_BDC_SERVER_PASSWORD = 'ads-integration-test-bdc-server-password'; const SECRET_STANDALONE_SERVER = 'ads-integration-test-standalone-server'; const SECRET_STANDALONE_SERVER_USERNAME = 'ads-integration-test-standalone-server-username'; const SECRET_STANDALONE_SERVER_PASSWORD = 'ads-integration-test-standalone-server-password'; @@ -104,9 +101,6 @@ const SECRET_STANDALONE_SERVER_PASSWORD_2019 = 'ads-integration-test-standalone- const ENVAR_AZURE_SERVER = 'AZURE_SQL'; const ENVAR_AZURE_SERVER_USERNAME = 'AZURE_SQL_USERNAME'; const ENVAR_AZURE_SERVER_PASSWORD = 'AZURE_SQL_PWD'; -const ENVAR_BDC_SERVER = 'BDC_BACKEND_HOSTNAME'; -const ENVAR_BDC_SERVER_USERNAME = 'BDC_BACKEND_USERNAME'; -const ENVAR_BDC_SERVER_PASSWORD = 'BDC_BACKEND_PWD'; const ENVAR_STANDALONE_SERVER = 'STANDALONE_SQL'; const ENVAR_STANDALONE_SERVER_USERNAME = 'STANDALONE_SQL_USERNAME'; const ENVAR_STANDALONE_SERVER_PASSWORD = 'STANDALONE_SQL_PWD'; @@ -115,16 +109,12 @@ const ENVAR_STANDALONE_SERVER_USERNAME_2019 = 'STANDALONE_SQL_USERNAME_2019'; const ENVAR_STANDALONE_SERVER_PASSWORD_2019 = 'STANDALONE_SQL_PWD_2019'; const ENVAR_PYTHON_INSTALL_PATH = 'PYTHON_TEST_PATH'; const ENVAR_RUN_PYTHON3_TEST = 'RUN_PYTHON3_TEST'; -const ENVAR_RUN_PYSPARK_TEST = 'RUN_PYSPARK_TEST'; // Mapping between AKV secret and the environment variable names const SecretEnVarMapping = []; SecretEnVarMapping.push([SECRET_AZURE_SERVER, ENVAR_AZURE_SERVER]); SecretEnVarMapping.push([SECRET_AZURE_SERVER_PASSWORD, ENVAR_AZURE_SERVER_PASSWORD]); SecretEnVarMapping.push([SECRET_AZURE_SERVER_USERNAME, ENVAR_AZURE_SERVER_USERNAME]); -SecretEnVarMapping.push([SECRET_BDC_SERVER, ENVAR_BDC_SERVER]); -SecretEnVarMapping.push([SECRET_BDC_SERVER_PASSWORD, ENVAR_BDC_SERVER_PASSWORD]); -SecretEnVarMapping.push([SECRET_BDC_SERVER_USERNAME, ENVAR_BDC_SERVER_USERNAME]); SecretEnVarMapping.push([SECRET_STANDALONE_SERVER, ENVAR_STANDALONE_SERVER]); SecretEnVarMapping.push([SECRET_STANDALONE_SERVER_PASSWORD, ENVAR_STANDALONE_SERVER_PASSWORD]); SecretEnVarMapping.push([SECRET_STANDALONE_SERVER_USERNAME, ENVAR_STANDALONE_SERVER_USERNAME]); @@ -135,7 +125,6 @@ SecretEnVarMapping.push([SECRET_STANDALONE_SERVER_PASSWORD_2019, ENVAR_STANDALON // Set the values that are not stored in AKV here process.env[ENVAR_PYTHON_INSTALL_PATH] = NOTEBOOK_PYTHON_INSTALL_PATH; process.env[ENVAR_RUN_PYTHON3_TEST] = '1'; -process.env[ENVAR_RUN_PYSPARK_TEST] = '0'; const credential = new DefaultAzureCredential(); const client = new SecretClient(AKV_URL, credential); diff --git a/extensions/integration-tests/src/test/notebook.test.ts b/extensions/integration-tests/src/test/notebook.test.ts index 933b46f442..4058a6f03f 100644 --- a/extensions/integration-tests/src/test/notebook.test.ts +++ b/extensions/integration-tests/src/test/notebook.test.ts @@ -8,7 +8,7 @@ import * as assert from 'assert'; import * as azdata from 'azdata'; import * as vscode from 'vscode'; import * as path from 'path'; -import { sqlNotebookContent, writeNotebookToFile, sqlKernelMetadata, getTempFilePath, pySparkNotebookContent, pySparkKernelMetadata, pythonKernelMetadata, sqlNotebookMultipleCellsContent, notebookContentForCellLanguageTest, sqlKernelSpec, pythonKernelSpec, pySparkKernelSpec, CellTypes } from './notebook.util'; +import { sqlNotebookContent, writeNotebookToFile, sqlKernelMetadata, getTempFilePath, pythonKernelMetadata, sqlNotebookMultipleCellsContent, notebookContentForCellLanguageTest, sqlKernelSpec, pythonKernelSpec, CellTypes, pythonNotebookContent, powershellKernelSpec } from './notebook.util'; import { getConfigValue, EnvironmentVariable_PYTHON_PATH, TestServerProfile, getStandaloneServer } from './testConfig'; import { connectToServer, sleep, testServerProfileToIConnectionProfile } from './utils'; import * as fs from 'fs'; @@ -149,7 +149,7 @@ suite('Notebook integration test suite', function () { if (process.env['RUN_PYTHON3_TEST'] === '1') { test('Python3 notebook test', async function () { - let notebook = await openNotebook(pySparkNotebookContent, pythonKernelMetadata, this.test.title); + let notebook = await openNotebook(pythonNotebookContent, pythonKernelMetadata, this.test.title); await runCell(notebook); let cellOutputs = notebook.document.cells[0].contents.outputs; console.log('Got cell outputs ---'); @@ -161,7 +161,7 @@ suite('Notebook integration test suite', function () { }); test('Clear all outputs - Python3 notebook ', async function () { - let notebook = await openNotebook(pySparkNotebookContent, pythonKernelMetadata, this.test.title); + let notebook = await openNotebook(pythonNotebookContent, pythonKernelMetadata, this.test.title); await runCell(notebook); await verifyClearAllOutputs(notebook); }); @@ -197,7 +197,7 @@ suite('Notebook integration test suite', function () { }); test('Change kernel different provider Python to SQL to Python', async function () { - let notebook = await openNotebook(pySparkNotebookContent, pythonKernelMetadata, this.test.title); + let notebook = await openNotebook(pythonNotebookContent, pythonKernelMetadata, this.test.title); await runCell(notebook); assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); assert(notebook.document.kernelSpec.name === 'python3', `Expected first kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); @@ -211,48 +211,23 @@ suite('Notebook integration test suite', function () { assert(kernelChanged && notebook.document.kernelSpec.name === 'python3', `Expected third kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); }); - test('Change kernel same provider Python to PySpark to Python', async function () { - let notebook = await openNotebook(pySparkNotebookContent, pythonKernelMetadata, this.test.title); - await runCell(notebook); - assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); - assert(notebook.document.kernelSpec.name === 'python3', `Expected first kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); - - let kernelChanged = await notebook.changeKernel(pySparkKernelSpec); - assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); - assert(kernelChanged && notebook.document.kernelSpec.name === 'pysparkkernel', `Expected second kernel name: pysparkkernel, Actual: ${notebook.document.kernelSpec.name}`); - - kernelChanged = await notebook.changeKernel(pythonKernelSpec); - assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); - assert(kernelChanged && notebook.document.kernelSpec.name === 'python3', `Expected third kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); - }); - } - - if (process.env['RUN_PYSPARK_TEST'] === '1') { - test('PySpark notebook test', async function () { - let notebook = await openNotebook(pySparkNotebookContent, pySparkKernelMetadata, this.test.title); + test('Change kernel same provider Python to Powershell to Python', async function () { + let notebook = await openNotebook(pythonNotebookContent, pythonKernelMetadata, this.test.title); await runCell(notebook); - let cellOutputs = notebook.document.cells[0].contents.outputs; - let sparkResult = (cellOutputs[3]).text; - assert(sparkResult === '2', `Expected spark result: 2, Actual: ${sparkResult}`); + assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); + assert(notebook.document.kernelSpec.name === 'python3', `Expected first kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); + + let kernelChanged = await notebook.changeKernel(powershellKernelSpec); + assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); + assert(kernelChanged && notebook.document.kernelSpec.name === 'powershell', `Expected second kernel name: powershell, Actual: ${notebook.document.kernelSpec.name}`); + + kernelChanged = await notebook.changeKernel(pythonKernelSpec); + assert(notebook.document.providerId === 'jupyter', `Expected providerId to be jupyter, Actual: ${notebook.document.providerId}`); + assert(kernelChanged && notebook.document.kernelSpec.name === 'python3', `Expected third kernel name: python3, Actual: ${notebook.document.kernelSpec.name}`); }); } /* After https://github.com/microsoft/azuredatastudio/issues/5598 is fixed, enable these tests. - test('scala language test', async function () { - let language = 'scala'; - await cellLanguageTest(notebookContentForCellLanguageTest, this.test.title, language, { - 'kernelspec': { - 'name': '', - 'display_name': '' - }, - 'language_info': { - name: language, - version: '', - mimetype: '' - } - }); - }); - test('empty language test', async function () { let language = ''; await cellLanguageTest(notebookContentForCellLanguageTest, this.test.title, language, { diff --git a/extensions/integration-tests/src/test/notebook.util.ts b/extensions/integration-tests/src/test/notebook.util.ts index 59e3b194c6..cc783081c1 100644 --- a/extensions/integration-tests/src/test/notebook.util.ts +++ b/extensions/integration-tests/src/test/notebook.util.ts @@ -19,7 +19,7 @@ export class CellTypes { public static readonly Raw = 'raw'; } -export const pySparkNotebookContent: azdata.nb.INotebookContents = { +export const pythonNotebookContent: azdata.nb.INotebookContents = { cells: [{ cell_type: CellTypes.Code, source: '1+1', @@ -28,8 +28,8 @@ export const pySparkNotebookContent: azdata.nb.INotebookContents = { }], metadata: { kernelspec: { - name: 'pysparkkernel', - display_name: 'PySpark' + name: 'python3', + display_name: 'Python 3' } }, nbformat: NBFORMAT, @@ -129,18 +129,6 @@ export const sqlNotebookMultipleCellsContent: azdata.nb.INotebookContents = { nbformat_minor: NBFORMAT_MINOR }; -export const pySparkKernelMetadata = { - kernelspec: { - name: 'pysparkkernel', - display_name: 'PySpark' - } -}; - -export const pySparkKernelSpec = { - name: 'pyspark', - display_name: 'PySpark' -}; - export const sqlKernelMetadata = { kernelspec: { name: 'SQL', @@ -165,6 +153,11 @@ export const pythonKernelSpec: azdata.nb.IKernelSpec = { display_name: 'Python 3' }; +export const powershellKernelSpec: azdata.nb.IKernelSpec = { + name: 'powershell', + display_name: 'PowerShell' +}; + export function writeNotebookToFile(pythonNotebook: azdata.nb.INotebookContents, relativeFilePath: string): vscode.Uri { let fileName = getTempFilePath(relativeFilePath); let notebookContentString = JSON.stringify(pythonNotebook); diff --git a/extensions/integration-tests/src/test/objectExplorer.test.ts b/extensions/integration-tests/src/test/objectExplorer.test.ts index f2d4513ac6..ec993823e7 100644 --- a/extensions/integration-tests/src/test/objectExplorer.test.ts +++ b/extensions/integration-tests/src/test/objectExplorer.test.ts @@ -5,16 +5,11 @@ import 'mocha'; import * as azdata from 'azdata'; -import { getBdcServer, TestServerProfile, getAzureServer, getStandaloneServer } from './testConfig'; +import { TestServerProfile, getAzureServer, getStandaloneServer } from './testConfig'; import { connectToServer, createDB, DefaultConnectTimeoutInMs, asyncTimeout, tryDeleteDB } from './utils'; import * as assert from 'assert'; suite('Object Explorer integration suite', () => { - test.skip('BDC instance node label test', async function () { - const expectedNodeLabel = ['Databases', 'Security', 'Server Objects']; - const server = await getBdcServer(); - await verifyOeNode(server, DefaultConnectTimeoutInMs, expectedNodeLabel); - }); test('Standalone instance node label test', async function () { if (process.platform === 'win32') { const expectedNodeLabel = ['Databases', 'Security', 'Server Objects']; @@ -27,18 +22,6 @@ suite('Object Explorer integration suite', () => { const server = await getAzureServer(); await verifyOeNode(server, DefaultConnectTimeoutInMs, expectedNodeLabel); }); - test.skip('BDC instance context menu test', async function () { - const server = await getBdcServer(); - let expectedActions: string[]; - // Properties comes from the admin-tool-ext-win extension which is for Windows only, so the item won't show up on non-Win32 platforms - if (process.platform === 'win32') { - expectedActions = ['Manage', 'New Query', 'New Notebook', 'Disconnect', 'Delete Connection', 'Refresh', 'Data-tier Application wizard', 'Launch Profiler', 'Properties']; - } - else { - expectedActions = ['Manage', 'New Query', 'New Notebook', 'Disconnect', 'Delete Connection', 'Refresh', 'Data-tier Application wizard', 'Launch Profiler']; - } - return await verifyContextMenu(server, expectedActions); - }); test('Azure SQL DB context menu test @UNSTABLE@', async function () { const server = await getAzureServer(); const expectedActions = ['Manage', 'New Query', 'New Notebook', 'Disconnect', 'Delete Connection', 'Refresh', 'Data-tier Application wizard', 'Launch Profiler']; diff --git a/extensions/integration-tests/src/test/testConfig.ts b/extensions/integration-tests/src/test/testConfig.ts index f8e021dcc6..c564e3dd66 100644 --- a/extensions/integration-tests/src/test/testConfig.ts +++ b/extensions/integration-tests/src/test/testConfig.ts @@ -40,7 +40,6 @@ export enum ConnectionProvider { export enum EngineType { Standalone, Azure, - BigDataCluster } let connectionProviderMapping: { [key: string]: { name: string; displayName: string } } = {}; @@ -55,9 +54,6 @@ export function getConfigValue(name: string): string { return configValue ? configValue.toString() : ''; } -export const EnvironmentVariable_BDC_SERVER: string = 'BDC_BACKEND_HOSTNAME'; -export const EnvironmentVariable_BDC_USERNAME: string = 'BDC_BACKEND_USERNAME'; -export const EnvironmentVariable_BDC_PASSWORD: string = 'BDC_BACKEND_PWD'; export const EnvironmentVariable_STANDALONE_SERVER: string = 'STANDALONE_SQL'; export const EnvironmentVariable_STANDALONE_USERNAME: string = 'STANDALONE_SQL_USERNAME'; export const EnvironmentVariable_STANDALONE_PASSWORD: string = 'STANDALONE_SQL_PWD'; @@ -116,17 +112,6 @@ let TestingServers: TestServerProfile[] = [ version: '2012', engineType: EngineType.Azure }), - new TestServerProfile( - { - serverName: getConfigValue(EnvironmentVariable_BDC_SERVER), - userName: getConfigValue(EnvironmentVariable_BDC_USERNAME), - password: getConfigValue(EnvironmentVariable_BDC_PASSWORD), - authenticationType: AuthenticationType.SqlLogin, - database: 'master', - provider: ConnectionProvider.SQLServer, - version: '2019', - engineType: EngineType.BigDataCluster - }), new TestServerProfile( { serverName: getConfigValue(EnvironmentVariable_STANDALONE_SERVER_2019), @@ -159,11 +144,6 @@ export async function getStandaloneServer(version: '2017' | '2019' = '2017'): Pr return servers.filter(s => s.version === version && s.engineType === EngineType.Standalone)[0]; } -export async function getBdcServer(): Promise { - let servers = await getTestingServers(); - return servers.filter(s => s.version === '2019' && s.engineType === EngineType.BigDataCluster)[0]; -} - export async function getTestingServers(): Promise { let promise = new Promise(resolve => { resolve(TestingServers); diff --git a/extensions/kusto/src/constants.ts b/extensions/kusto/src/constants.ts index 1f1421b733..8f68807341 100644 --- a/extensions/kusto/src/constants.ts +++ b/extensions/kusto/src/constants.ts @@ -21,9 +21,6 @@ export const kustoProviderName = 'KUSTO'; export const UNTITLED_SCHEMA = 'untitled'; -export const clusterEndpointsProperty = 'clusterEndpoints'; -export const hdfsRootPath = '/'; - // SERVICE NAMES ////////////////////////////////////////////////////////// export const ObjectExplorerService = 'objectexplorer'; export const objectExplorerPrefix: string = 'objectexplorer://'; diff --git a/extensions/mssql/notebooks/TSG/cluster-status.ipynb b/extensions/mssql/notebooks/TSG/cluster-status.ipynb deleted file mode 100644 index e6298aa51f..0000000000 --- a/extensions/mssql/notebooks/TSG/cluster-status.ipynb +++ /dev/null @@ -1,389 +0,0 @@ -{ - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python", - "version": "3.6.6", - "mimetype": "text/x-python", - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "pygments_lexer": "ipython3", - "nbconvert_exporter": "python", - "file_extension": ".py" - } - }, - "nbformat_minor": 2, - "nbformat": 4, - "cells": [ - { - "cell_type": "markdown", - "source": [ - "![11811317_10153406249401648_2787740058697948111_n](https://raw.githubusercontent.com/Microsoft/sqlworkshops/master/graphics/solutions-microsoft-logo-small.png)\n", - "\n", - "# View the status of your SQL Server Big Data Cluster\n", - "This notebook allows you to see the status of the controller, master instance, and pools in your SQL Server Big Data Cluster.\n", - "\n", - "> ## **Important Instructions**\n", - "> ### **Before you begin, you will need:**\n", - ">* Big Data Cluster name\n", - ">* Controller username\n", - ">* Controller password\n", - ">* Controller endpoint \n", - "\n", - "You can find the controller endpoint from the SQL Big Data Cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\n", - "\n", - "If you do not know the credentials, ask the admin who deployed your cluster.\n", - "\n", - "### **Prerequisites**\n", - "Ensure the following tools are installed and added to PATH before proceeding.\n", - "\n", - "|Tools|Description|Installation|\n", - "|---|---|---|\n", - "|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n", - "|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |\n", - "|Pandas Package | Python package for data manipulation | Will be installed by the notebook if not present |\n", - "\n", - "\n", - "### **Instructions**\n", - "* For the best experience, click **Run Cells** on the toolbar above. This will automatically execute all code cells below and show the cluster status in each table.\n", - "* When you click **Run Cells** for this Notebook, you will be prompted at the *Log in to your Big Data Cluster* code cell to provide your login credentials. Follow the prompts and press enter to proceed.\n", - "* **You won't need to modify any of the code cell contents** in this Notebook. If you accidentally made a change, you can reopen this Notebook from the cluster dashboard.\n", - "\n", - "\n", - "" - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": "### **Check azdata version**", - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "import sys, os\r\n", - "cmd = f'azdata --version'\r\n", - "cmdOutput = !{cmd}\r\n", - "azdataStr = '\\'azdata\\''\r\n", - "if len(cmdOutput) > 0 and ('command not found' in cmdOutput[1] or f'{azdataStr} is not recognized as an internal or external command' in cmdOutput[0]):\r\n", - " raise SystemExit('azdata not found! Please make sure azdata is installed and added to path' + '.\\n')\r\n", - "if '15.0' in cmdOutput[0]:\r\n", - " print('azdata version: ' + cmdOutput[0])\r\n", - "" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": "### **Install latest version of pandas**", - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "#install pandas\r\n", - "import pandas\r\n", - "pandas_version = pandas.__version__.split('.')\r\n", - "pandas_major = int(pandas_version[0])\r\n", - "pandas_minor = int(pandas_version[1])\r\n", - "pandas_patch = int(pandas_version[2])\r\n", - "if not (pandas_major > 0 or (pandas_major == 0 and pandas_minor > 24) or (pandas_major == 0 and pandas_minor == 24 and pandas_patch >= 2)):\r\n", - " pandasVersion = 'pandas==0.24.2'\r\n", - " cmd = f'{sys.executable} -m pip install {pandasVersion}'\r\n", - " cmdOutput = !{cmd}\r\n", - " print(f'\\nSuccess: Upgraded pandas to 0.24.2.')\r\n", - "else:\r\n", - " print('Pandas required version is already installed!') " - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "## **Log in to your Big Data Cluster**\r\n", - "To view cluster status, you will need to connect to your Big Data Cluster through azdata. \r\n", - "\r\n", - "When you run this code cell, you will be prompted for:\r\n", - "- Cluster name\r\n", - "- Controller username\r\n", - "- Controller password\r\n", - "\r\n", - "To proceed:\r\n", - "- **Click** on the input box\r\n", - "- **Type** the login info\r\n", - "- **Press** enter.\r\n", - "\r\n", - "If your cluster is missing a configuration file, you will be asked to provide your controller endpoint. (Format: **https://00.00.00.000:00000**) You can find the controller endpoint from the Big Data Cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\r\n", - "" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "import os, getpass, json\n", - "import pandas as pd\n", - "import numpy as np\n", - "from IPython.display import *\n", - "\n", - "def PromptForInfo(promptMsg, isPassword, errorMsg):\n", - " if isPassword:\n", - " promptResponse = getpass.getpass(prompt=promptMsg)\n", - " else:\n", - " promptResponse = input(promptMsg)\n", - " if promptResponse == \"\":\n", - " raise SystemExit(errorMsg + '\\n')\n", - " return promptResponse\n", - "\n", - "# Prompt user inputs:\n", - "cluster_name = PromptForInfo('Please provide your Cluster Name: ', False, 'Cluster Name is required!')\n", - "\n", - "controller_username = PromptForInfo('Please provide your Controller Username for login: ', False, 'Controller Username is required!')\n", - "\n", - "controller_password = PromptForInfo('Controller Password: ', True, 'Password is required!')\n", - "print('***********')\n", - "\n", - "!azdata logout\n", - "# Login in to your Big Data Cluster \n", - "cmd = f'azdata login --namespace {cluster_name} -u {controller_username} -a yes'\n", - "print(\"Start \" + cmd)\n", - "os.environ['CONTROLLER_USERNAME'] = controller_username\n", - "os.environ['CONTROLLER_PASSWORD'] = controller_password\n", - "os.environ['ACCEPT_EULA'] = 'yes'\n", - "\n", - "loginResult = !{cmd}\n", - "if 'ERROR: Please check your kube config or specify the correct controller endpoint with: --controller-endpoint https://:.' in loginResult[0] or 'ERROR' in loginResult[0]:\n", - " controller_ip = input('Please provide your Controller endpoint: ')\n", - " if controller_ip == \"\":\n", - " raise SystemExit(f'Controller IP is required!' + '\\n')\n", - " else:\n", - " cmd = f'azdata login --namespace {cluster_name} -e {controller_ip} -u {controller_username} -a yes'\n", - " loginResult = !{cmd}\n", - "print(loginResult)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "## **Status of Big Data Cluster**\r\n", - "After you successfully login to your bdc, you can view the overall status of each container before drilling down into each component." - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Helper methods for formatting\n", - "def formatColumnNames(column):\n", - " return ' '.join(word[0].upper() + word[1:] for word in column.split())\n", - "\n", - "pd.set_option('display.max_colwidth', -1)\n", - "def show_results(results):\n", - " strResult = ''.join(results)\n", - " jsonResults = json.loads(strResult)\n", - " results = jsonResults['result']\n", - " if isinstance(results, list):\n", - " for result in results:\n", - " if isinstance(result, list):\n", - " show_formattedArray(result)\n", - " else:\n", - " show_keys(result)\n", - " else:\n", - " show_keys(results)\n", - "\n", - "def show_keys(results):\n", - " listKeys = []\n", - " if isinstance(results, dict):\n", - " for key in results.keys():\n", - " if results[key] and not isinstance(results[key], list):\n", - " print('\\033[1m' + formatColumnNames(key) + ': \\033[0m' + results[key])\n", - " if results[key] and isinstance(results[key], list):\n", - " listKeys.append(key)\n", - " for key in listKeys:\n", - " show_formattedArray(results[key])\n", - " if isinstance(results, str):\n", - " print('\\033[1m' + results + ': \\033[0m')\n", - "\n", - "def show_formattedArray(results):\n", - " fomattedRow = []\n", - " if not isinstance(results, list):\n", - " show_formattedResults(results)\n", - " else:\n", - " for row in results:\n", - " if isinstance(row, str):\n", - " show_keys(row)\n", - " else:\n", - " fomattedRow.append({ k : v for k,v in row.items() if isinstance(v, str) or v is None})\n", - " df = pd.DataFrame(fomattedRow)\n", - " df.columns = [formatColumnNames(n) for n in fomattedRow[0].keys()]\n", - " mydata = HTML(df.to_html(render_links=True))\n", - " display(mydata)\n", - " nameKeys = [k for k in fomattedRow[0].keys() if 'Name' in k]\n", - " for key in results[0].keys():\n", - " if key not in fomattedRow[0].keys():\n", - " for result in results:\n", - " print('\\033[1m' + formatColumnNames(nameKeys[0]) + ': \\033[0m' + result[nameKeys[0]])\n", - " show_formattedArray(result[key])\n", - "\n", - "def show_formattedResults(input):\n", - " df = pd.DataFrame([input])\n", - " df.columns = [formatColumnNames(n) for n in [input][0].keys()]\n", - " mydata = HTML(df.to_html(render_links=True))\n", - " display(mydata)\n", - " \n", - "# Display status of Big Data Cluster\n", - "results = !azdata bdc status show -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "## **Cluster Status**\r\n", - "For each cluster component below, running each code cell will generate a table. This table will include:\r\n", - "\r\n", - "|Column Name|Description|\r\n", - "|---|---|\r\n", - "|**Kind** | Identifies if component is a pod or a set. |\r\n", - "|**LogsURL** | Link to [Kibana](https://www.elastic.co/guide/en/kibana/current/introduction.html) logs which is used for troubleshooting. |\r\n", - "|**Name** | Provides the specific name of the pod or set. |\r\n", - "|**NodeMetricsURL** | Link to [Grafana](https://grafana.com/docs/guides/basic_concepts/) dashboard to view key metrics of the node. |\r\n", - "|**SQLMetricsURL** | Link to [Grafana](https://grafana.com/docs/guides/basic_concepts/) dashboard to view key metrics of the SQL instance. |\r\n", - "|**State** | Indicates state of the pod or set. |\r\n", - "\r\n", - "----------------------------------------------------------------" - ], - "metadata": {} - }, - { - "cell_type": "markdown", - "source": [ - "### **Controller status**\n", - "To learn more about the controller, [read here.](https://docs.microsoft.com/sql/big-data-cluster/concept-controller?view=sql-server-ver15)" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Display status of controller\n", - "results = !azdata bdc control status show --all -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "### **Master Instance status**\n", - "To learn more about the master instance, [read here.](https://docs.microsoft.com/sql/big-data-cluster/concept-master-instance?view=sqlallproducts-allversions)" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "results = !azdata bdc sql status show --resource master --all -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "### **Compute Pool status**\n", - "To learn more about compute pool, [read here.](https://docs.microsoft.com/sql/big-data-cluster/concept-compute-pool?view=sqlallproducts-allversions)" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Display status of compute pool\n", - "results = !azdata bdc sql status show --resource compute-0 --all -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "### **Storage Pool status**\n", - "To learn more about storage pool, [read here.](https://docs.microsoft.com/sql/big-data-cluster/concept-storage-pool?view=sqlallproducts-allversions)" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Display status of storage pools\n", - "results = !azdata bdc sql status show --resource storage-0 --all -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "### **Data Pool status**\n", - "To learn more about data pool, [read here.](https://docs.microsoft.com/sql/big-data-cluster/concept-data-pool?view=sqlallproducts-allversions)" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Display status of data pools\n", - "results = !azdata bdc sql status show --resource data-0 --all -o json\n", - "show_results(results)" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", - "source": [ - "### **Spark Pool status**\n", - "Displays status of spark pool if it exists. Otherwise, will show as \"No spark pool.\"" - ], - "metadata": {} - }, - { - "cell_type": "code", - "source": [ - "# Display status of spark pool\n", - "results = !azdata bdc spark status show --all -o json\n", - "show_results(results)\n", - "" - ], - "metadata": {}, - "outputs": [], - "execution_count": 0 - } - ] -} diff --git a/extensions/mssql/package.json b/extensions/mssql/package.json index 3ed29dca6f..811f9e75f9 100644 --- a/extensions/mssql/package.json +++ b/extensions/mssql/package.json @@ -44,82 +44,6 @@ "light": "resources/light/export_blue_light.svg" } }, - { - "command": "mssqlCluster.uploadFiles", - "title": "%mssqlCluster.uploadFiles%" - }, - { - "command": "mssqlCluster.mkdir", - "title": "%mssqlCluster.mkdir%" - }, - { - "command": "mssqlCluster.deleteFiles", - "title": "%mssqlCluster.deleteFiles%" - }, - { - "command": "mssqlCluster.previewFile", - "title": "%mssqlCluster.previewFile%" - }, - { - "command": "mssqlCluster.saveFile", - "title": "%mssqlCluster.saveFile%" - }, - { - "command": "mssqlCluster.copyPath", - "title": "%mssqlCluster.copyPath%" - }, - { - "command": "mssqlCluster.manageAccess", - "title": "%mssqlCluster.manageAccess%" - }, - { - "command": "mssqlCluster.task.newNotebook", - "title": "%notebook.command.new%", - "icon": { - "dark": "resources/dark/new_notebook.svg", - "light": "resources/light/new_notebook.svg" - } - }, - { - "command": "mssqlCluster.task.openNotebook", - "title": "%notebook.command.open%", - "icon": { - "dark": "resources/dark/open_notebook_inverse.svg", - "light": "resources/light/open_notebook.svg" - } - }, - { - "command": "mssqlCluster.livy.cmd.submitSparkJob", - "title": "%title.submitSparkJob%" - }, - { - "command": "mssqlCluster.livy.task.submitSparkJob", - "title": "%title.newSparkJob%", - "icon": { - "dark": "resources/dark/new_spark_job_inverse.svg", - "light": "resources/light/new_spark_job.svg" - } - }, - { - "command": "mssqlCluster.task.openClusterDashboard", - "title": "%title.openClusterDashboard%", - "icon": { - "dark": "resources/dark/cluster_status_inverse.svg", - "light": "resources/light/cluster_status.svg" - } - }, - { - "command": "mssqlCluster.livy.task.openYarnHistory", - "title": "%title.openYarnHistory%", - "icon": { - "dark": "resources/light/hadoop.svg", - "light": "resources/light/hadoop.svg" - } - }, - { - "command": "mssqlCluster.livy.cmd.submitFileToSparkJob", - "title": "%title.submitSparkJob%" - }, { "command": "mssql.searchServers", "title": "%title.searchServers%" @@ -434,54 +358,6 @@ "command": "mssql.exportNotebookToSql", "when": "false" }, - { - "command": "mssqlCluster.uploadFiles", - "when": "false" - }, - { - "command": "mssqlCluster.mkdir", - "when": "false" - }, - { - "command": "mssqlCluster.deleteFiles", - "when": "false" - }, - { - "command": "mssqlCluster.previewFile", - "when": "false" - }, - { - "command": "mssqlCluster.saveFile", - "when": "false" - }, - { - "command": "mssqlCluster.copyPath", - "when": "false" - }, - { - "command": "mssqlCluster.manageAccess", - "when": "false" - }, - { - "command": "mssqlCluster.task.newNotebook", - "when": "false" - }, - { - "command": "mssqlCluster.task.openNotebook", - "when": "false" - }, - { - "command": "mssqlCluster.livy.cmd.submitFileToSparkJob", - "when": "false" - }, - { - "command": "mssqlCluster.livy.task.submitSparkJob", - "when": "false" - }, - { - "command": "mssqlCluster.task.openClusterDashboard", - "when": "false" - }, { "command": "mssql.newTable", "when": "false" @@ -492,51 +368,6 @@ } ], "objectExplorer/item/context": [ - { - "command": "mssqlCluster.uploadFiles", - "when": "nodeType=~/^mssqlCluster/ && nodeType != mssqlCluster:message && nodeType != mssqlCluster:file && nodeSubType=~/^(?!:mount).*$/", - "group": "1mssqlCluster@1" - }, - { - "command": "mssqlCluster.mkdir", - "when": "nodeType=~/^mssqlCluster/ && nodeType != mssqlCluster:message && nodeType != mssqlCluster:file && nodeSubType=~/^(?!:mount).*$/", - "group": "1mssqlCluster@1" - }, - { - "command": "mssqlCluster.saveFile", - "when": "nodeType == mssqlCluster:file", - "group": "1mssqlCluster@1" - }, - { - "command": "mssqlCluster.previewFile", - "when": "nodeType == mssqlCluster:file", - "group": "1mssqlCluster@2" - }, - { - "command": "mssqlCluster.copyPath", - "when": "nodeType=~/^mssqlCluster/ && nodeType != mssqlCluster:connection && nodeType != mssqlCluster:message && nodeType != mssqlCluster:hdfs", - "group": "1mssqlCluster@3" - }, - { - "command": "mssqlCluster.manageAccess", - "when": "nodeType=~/^mssqlCluster/ && nodeType != mssqlCluster:connection && nodeType != mssqlCluster:message", - "group": "1mssqlCluster@3" - }, - { - "command": "mssqlCluster.deleteFiles", - "when": "nodeType=~/^mssqlCluster/ && nodeType != mssqlCluster:hdfs && nodeType != mssqlCluster:connection && viewItem != mssqlCluster:connection && nodeType != mssqlCluster:message && nodeSubType=~/^(?!:mount).*$/", - "group": "1mssqlCluster@4" - }, - { - "command": "mssqlCluster.livy.cmd.submitSparkJob", - "when": "nodeType == mssqlCluster:hdfs", - "group": "1mssqlCluster@7" - }, - { - "command": "mssqlCluster.livy.cmd.submitFileToSparkJob", - "when": "nodeType == mssqlCluster:file && nodeSubType =~/:spark:/", - "group": "1mssqlCluster@6" - }, { "command": "mssql.designTable", "when": "connectionProvider == MSSQL && nodeType == Table && nodeSubType != LedgerDropped", @@ -743,57 +574,6 @@ } ] }, - "dashboard.tabs": [ - { - "id": "mssql-big-data-cluster", - "description": "%tab.bigDataClusterDescription%", - "provider": "MSSQL", - "title": "%title.bigDataCluster%", - "group": "home", - "when": "connectionProvider == 'MSSQL' && mssql:iscluster && dashboardContext == 'server'", - "container": { - "grid-container": [ - { - "name": "%title.tasks%", - "row": 0, - "col": 0, - "colspan": 1, - "widget": { - "tasks-widget": [ - "mssqlCluster.task.newNotebook", - "mssqlCluster.task.openNotebook", - "mssqlCluster.livy.task.submitSparkJob", - "mssqlCluster.task.openClusterDashboard" - ] - } - }, - { - "name": "%title.endpoints%", - "row": 1, - "col": 0, - "rowspan": 2.5, - "colspan": 2, - "widget": { - "modelview": { - "id": "bdc-endpoints" - } - } - }, - { - "name": "%title.books%", - "row": 0, - "col": 2, - "colspan": 1, - "widget": { - "modelview": { - "id": "books-widget" - } - } - } - ] - } - } - ], "connectionProvider": { "providerId": "MSSQL", "displayName": "%mssql.provider.displayName%", @@ -810,13 +590,6 @@ "light": "resources/light/azureDB.svg", "dark": "resources/dark/azureDB_inverse.svg" } - }, - { - "id": "mssql:cluster", - "path": { - "light": "resources/light/sql_bigdata_cluster.svg", - "dark": "resources/dark/sql_bigdata_cluster_inverse.svg" - } } ], "connectionOptions": [ diff --git a/extensions/mssql/package.nls.json b/extensions/mssql/package.nls.json index 016378595a..08094e2a2e 100644 --- a/extensions/mssql/package.nls.json +++ b/extensions/mssql/package.nls.json @@ -6,34 +6,9 @@ "json.schemas.schema.desc": "The schema definition for the given URL. The schema only needs to be provided to avoid accesses to the schema URL.", "json.format.enable.desc": "Enable/disable default JSON formatter (requires restart)", - "mssqlCluster.uploadFiles": "Upload files", - "mssqlCluster.mkdir": "New directory", - "mssqlCluster.deleteFiles": "Delete", - "mssqlCluster.previewFile": "Preview", - "mssqlCluster.saveFile": "Save", - "mssqlCluster.copyPath": "Copy Path", - "mssqlCluster.manageAccess": "Manage Access", - - "notebook.command.new": "New Notebook", - "notebook.command.open": "Open Notebook", - - "tab.bigDataClusterDescription": "Tasks and information about your SQL Server Big Data Cluster", - "title.bigDataCluster": "SQL Server Big Data Cluster", - "title.submitSparkJob": "Submit Spark Job", - "title.newSparkJob": "New Spark Job", - "title.openSparkHistory": "View Spark History", - "title.openYarnHistory": "View Yarn History", - "title.tasks": "Tasks", - "title.installPackages": "Install Packages", - "title.configurePython": "Configure Python for Notebooks", - "title.openClusterDashboard": "Cluster\nDashboard", - "title.searchServers": "Search: Servers", "title.clearSearchServerResult": "Search: Clear Search Server Results", - "title.endpoints": "Service Endpoints", - "title.books": "Notebooks", - "title.showLogFile": "Show Log File", "mssql.disabled": "Disabled", diff --git a/extensions/mssql/resources/dark/cluster_inverse.svg b/extensions/mssql/resources/dark/cluster_inverse.svg deleted file mode 100644 index b35c0c5d36..0000000000 --- a/extensions/mssql/resources/dark/cluster_inverse.svg +++ /dev/null @@ -1 +0,0 @@ -cluster_inverse \ No newline at end of file diff --git a/extensions/mssql/resources/dark/cluster_status_inverse.svg b/extensions/mssql/resources/dark/cluster_status_inverse.svg deleted file mode 100644 index 971d362195..0000000000 --- a/extensions/mssql/resources/dark/cluster_status_inverse.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - -new_notebook_inverse - - - - diff --git a/extensions/mssql/resources/dark/new_notebook.svg b/extensions/mssql/resources/dark/new_notebook.svg deleted file mode 100644 index 6557616999..0000000000 --- a/extensions/mssql/resources/dark/new_notebook.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/extensions/mssql/resources/dark/new_spark_job_inverse.svg b/extensions/mssql/resources/dark/new_spark_job_inverse.svg deleted file mode 100644 index e5ed4b3190..0000000000 --- a/extensions/mssql/resources/dark/new_spark_job_inverse.svg +++ /dev/null @@ -1 +0,0 @@ -new_spark_job_inverse \ No newline at end of file diff --git a/extensions/mssql/resources/dark/open_notebook_inverse.svg b/extensions/mssql/resources/dark/open_notebook_inverse.svg deleted file mode 100644 index a95750c49f..0000000000 --- a/extensions/mssql/resources/dark/open_notebook_inverse.svg +++ /dev/null @@ -1 +0,0 @@ -open_notebook_inverse \ No newline at end of file diff --git a/extensions/mssql/resources/dark/sql_bigdata_cluster_inverse.svg b/extensions/mssql/resources/dark/sql_bigdata_cluster_inverse.svg deleted file mode 100644 index dbc823e9f0..0000000000 --- a/extensions/mssql/resources/dark/sql_bigdata_cluster_inverse.svg +++ /dev/null @@ -1,46 +0,0 @@ - - - - - - image/svg+xml - - sql_bigdata_cluster - - - - - - - sql_bigdata_cluster - - - - diff --git a/extensions/mssql/resources/light/cluster.svg b/extensions/mssql/resources/light/cluster.svg deleted file mode 100644 index e0e8e68f41..0000000000 --- a/extensions/mssql/resources/light/cluster.svg +++ /dev/null @@ -1 +0,0 @@ -cluster \ No newline at end of file diff --git a/extensions/mssql/resources/light/cluster_status.svg b/extensions/mssql/resources/light/cluster_status.svg deleted file mode 100644 index b2d4d4bc65..0000000000 --- a/extensions/mssql/resources/light/cluster_status.svg +++ /dev/null @@ -1,9 +0,0 @@ - - - -new_notebook - - - - diff --git a/extensions/mssql/resources/light/hadoop.svg b/extensions/mssql/resources/light/hadoop.svg deleted file mode 100644 index 0757489a17..0000000000 --- a/extensions/mssql/resources/light/hadoop.svg +++ /dev/null @@ -1 +0,0 @@ -hadoop \ No newline at end of file diff --git a/extensions/mssql/resources/light/new_notebook.svg b/extensions/mssql/resources/light/new_notebook.svg deleted file mode 100644 index 6557616999..0000000000 --- a/extensions/mssql/resources/light/new_notebook.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/extensions/mssql/resources/light/new_spark_job.svg b/extensions/mssql/resources/light/new_spark_job.svg deleted file mode 100644 index 3775bf4da3..0000000000 --- a/extensions/mssql/resources/light/new_spark_job.svg +++ /dev/null @@ -1 +0,0 @@ -new_spark_job \ No newline at end of file diff --git a/extensions/mssql/resources/light/open_notebook.svg b/extensions/mssql/resources/light/open_notebook.svg deleted file mode 100644 index 0041ae9b21..0000000000 --- a/extensions/mssql/resources/light/open_notebook.svg +++ /dev/null @@ -1 +0,0 @@ -open_notebook \ No newline at end of file diff --git a/extensions/mssql/resources/light/sql_bigdata_cluster.svg b/extensions/mssql/resources/light/sql_bigdata_cluster.svg deleted file mode 100644 index fcf1133186..0000000000 --- a/extensions/mssql/resources/light/sql_bigdata_cluster.svg +++ /dev/null @@ -1,45 +0,0 @@ - - - - - - image/svg+xml - - sql_bigdata_cluster - - - - - - - sql_bigdata_cluster - - - - diff --git a/extensions/mssql/src/constants.ts b/extensions/mssql/src/constants.ts index e36110dab3..3e56ae9b28 100644 --- a/extensions/mssql/src/constants.ts +++ b/extensions/mssql/src/constants.ts @@ -9,30 +9,8 @@ export const serviceCrashLink = 'https://github.com/Microsoft/vscode-mssql/wiki/ export const extensionConfigSectionName = 'mssql'; // DATA PROTOCOL VALUES /////////////////////////////////////////////////////////// -export const mssqlClusterProviderName = 'mssqlCluster'; -export const hadoopEndpointNameGateway = 'gateway'; -export const protocolVersion = '1.0'; -export const authenticationTypePropName = 'authenticationType'; -export const integratedAuth = 'integrated'; -export const hostPropName = 'host'; -export const userPropName = 'user'; -export const knoxPortPropName = 'knoxport'; -export const passwordPropName = 'password'; -export const groupIdPropName = 'groupId'; -export const defaultKnoxPort = 30443; -export const groupIdName = 'groupId'; export const sqlProviderName = 'MSSQL'; -export const UNTITLED_SCHEMA = 'untitled'; - -export const hadoopConnectionTimeoutSeconds = 15; -export const hdfsRootPath = '/'; - -export const clusterEndpointsProperty = 'clusterEndpoints'; -export const isBigDataClusterProperty = 'isBigDataCluster'; - -export const ViewType = 'view'; - // SERVICE NAMES ////////////////////////////////////////////////////////// export const ObjectExplorerService = 'objectexplorer'; export const CmsService = 'cmsService'; @@ -44,39 +22,3 @@ export const SqlAssessmentService = 'sqlAssessmentService'; export const SqlMigrationService = 'sqlMigrationService'; export const NotebookConvertService = 'notebookConvertService'; export const AzureBlobService = 'azureBlobService'; - -export enum BuiltInCommands { - SetContext = 'setContext' -} - -export enum CommandContext { - WizardServiceEnabled = 'wizardservice:enabled' -} - -export enum MssqlClusterItems { - Connection = 'mssqlCluster:connection', - Folder = 'mssqlCluster:folder', - File = 'mssqlCluster:file', - Error = 'mssqlCluster:error' -} - -export enum MssqlClusterItemsSubType { - Mount = ':mount:', - MountChild = ':mountChild:', - Spark = ':spark:' -} - -// SPARK JOB SUBMISSION ////////////////////////////////////////////////////////// -export const mssqlClusterNewNotebookTask = 'mssqlCluster.task.newNotebook'; -export const mssqlClusterOpenNotebookTask = 'mssqlCluster.task.openNotebook'; -export const mssqlOpenClusterDashboard = 'mssqlCluster.task.openClusterDashboard'; -export const mssqlClusterLivySubmitSparkJobCommand = 'mssqlCluster.livy.cmd.submitSparkJob'; -export const mssqlClusterLivySubmitSparkJobFromFileCommand = 'mssqlCluster.livy.cmd.submitFileToSparkJob'; -export const mssqlClusterLivySubmitSparkJobTask = 'mssqlCluster.livy.task.submitSparkJob'; -export const mssqlClusterLivyOpenSparkHistory = 'mssqlCluster.livy.task.openSparkHistory'; -export const mssqlClusterLivyOpenYarnHistory = 'mssqlCluster.livy.task.openYarnHistory'; -export const mssqlClusterLivySubmitPath = '/gateway/default/livy/v1/batches'; -export const mssqlClusterLivyTimeInMSForCheckYarnApp = 1000; -export const mssqlClusterLivyRetryTimesForCheckYarnApp = 20; -export const mssqlClusterSparkJobFileSelectorButtonWidth = '30px'; -export const mssqlClusterSparkJobFileSelectorButtonHeight = '30px'; diff --git a/extensions/mssql/src/contextProvider.ts b/extensions/mssql/src/contextProvider.ts index e9d9cb1612..3d4a99143c 100644 --- a/extensions/mssql/src/contextProvider.ts +++ b/extensions/mssql/src/contextProvider.ts @@ -7,7 +7,6 @@ import * as vscode from 'vscode'; import * as azdata from 'azdata'; import * as types from './types'; -import * as Constants from './constants'; enum BuiltInCommands { SetContext = 'setContext', @@ -16,7 +15,6 @@ enum BuiltInCommands { enum ContextKeys { ISCLOUD = 'mssql:iscloud', EDITIONID = 'mssql:engineedition', - ISCLUSTER = 'mssql:iscluster', SERVERMAJORVERSION = 'mssql:servermajorversion' } @@ -41,7 +39,6 @@ export default class ContextProvider { public onDashboardOpen(e: azdata.DashboardDocument): void { let iscloud: boolean; let edition: number; - let isCluster: boolean = false; let serverMajorVersion: number; if (e.profile.providerName.toLowerCase() === 'mssql' && !types.isUndefinedOrNull(e.serverInfo) && !types.isUndefinedOrNull(e.serverInfo.engineEditionId)) { if (isCloudEditions.some(i => i === e.serverInfo.engineEditionId)) { @@ -51,13 +48,6 @@ export default class ContextProvider { } edition = e.serverInfo.engineEditionId; - - if (!types.isUndefinedOrNull(e.serverInfo.options)) { - let isBigDataCluster = e.serverInfo.options[Constants.isBigDataClusterProperty]; - if (isBigDataCluster) { - isCluster = isBigDataCluster; - } - } serverMajorVersion = e.serverInfo.serverMajorVersion; } @@ -69,10 +59,6 @@ export default class ContextProvider { void setCommandContext(ContextKeys.EDITIONID, edition); } - if (!types.isUndefinedOrNull(isCluster)) { - void setCommandContext(ContextKeys.ISCLUSTER, isCluster); - } - if (!types.isUndefinedOrNull(serverMajorVersion)) { void setCommandContext(ContextKeys.SERVERMAJORVERSION, serverMajorVersion); } diff --git a/extensions/mssql/src/dashboard/serviceEndpoints.ts b/extensions/mssql/src/dashboard/serviceEndpoints.ts deleted file mode 100644 index 9ec1c0d388..0000000000 --- a/extensions/mssql/src/dashboard/serviceEndpoints.ts +++ /dev/null @@ -1,180 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import * as bdc from 'bdc'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import * as utils from '../utils'; - -const mgmtProxyName = 'mgmtproxy'; -const grafanaEndpointName = 'metricsui'; -const grafanaDescription = localize('grafana', "Metrics Dashboard"); -const logsuiEndpointName = 'logsui'; -const logsuiDescription = localize('kibana', "Log Search Dashboard"); -const sparkHistoryEndpointName = 'spark-history'; -const sparkHistoryDescription = localize('sparkHistory', "Spark Jobs Management and Monitoring Dashboard"); -const yarnUiEndpointName = 'yarn-ui'; -const yarnHistoryDescription = localize('yarnHistory', "Spark Diagnostics and Monitoring Dashboard"); -const hyperlinkedEndpoints = [grafanaEndpointName, logsuiEndpointName, sparkHistoryEndpointName, yarnUiEndpointName]; - -export function registerServiceEndpoints(context: vscode.ExtensionContext): void { - azdata.ui.registerModelViewProvider('bdc-endpoints', async (view) => { - let endpointsArray: Array = Object.assign([], utils.getClusterEndpoints(view.serverInfo)); - - if (endpointsArray.length > 0) { - const grafanaEp = endpointsArray.find(e => e.name === grafanaEndpointName); - if (grafanaEp && grafanaEp.endpoint && grafanaEp.endpoint.indexOf('/d/wZx3OUdmz') === -1) { - // Update to have correct URL - grafanaEp.endpoint += '/d/wZx3OUdmz'; - } - const kibanaEp = endpointsArray.find(e => e.name === logsuiEndpointName); - if (kibanaEp && kibanaEp.endpoint && kibanaEp.endpoint.indexOf('/app/kibana#/discover') === -1) { - // Update to have correct URL - kibanaEp.endpoint += '/app/kibana#/discover'; - } - - if (!grafanaEp) { - // We are on older CTP, need to manually add some endpoints. - // TODO remove once CTP support goes away - const managementProxyEp = endpointsArray.find(e => e.name === mgmtProxyName); - if (managementProxyEp) { - endpointsArray.push(getCustomEndpoint(managementProxyEp, grafanaEndpointName, grafanaDescription, '/grafana/d/wZx3OUdmz')); - endpointsArray.push(getCustomEndpoint(managementProxyEp, logsuiEndpointName, logsuiDescription, '/kibana/app/kibana#/discover')); - } - - const gatewayEp = endpointsArray.find(e => e.name === 'gateway'); - if (gatewayEp) { - endpointsArray.push(getCustomEndpoint(gatewayEp, sparkHistoryEndpointName, sparkHistoryDescription, '/gateway/default/sparkhistory')); - endpointsArray.push(getCustomEndpoint(gatewayEp, yarnUiEndpointName, yarnHistoryDescription, '/gateway/default/yarn')); - } - } - - endpointsArray = endpointsArray.map(e => { - e.description = getEndpointDisplayText(e.name, e.description); - return e; - }); - - // Sort the endpoints. The sort method is that SQL Server Master is first - followed by all - // others in alphabetical order by endpoint - const sqlServerMasterEndpoints = endpointsArray.filter(e => e.name === Endpoint.sqlServerMaster); - endpointsArray = endpointsArray.filter(e => e.name !== Endpoint.sqlServerMaster) - .sort((e1, e2) => e1.endpoint.localeCompare(e2.endpoint)); - endpointsArray.unshift(...sqlServerMasterEndpoints); - - const container = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'column', width: '100%', height: '100%' }).component(); - endpointsArray.forEach(endpointInfo => { - const endPointRow = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'row' }).component(); - const nameCell = view.modelBuilder.text().withProps({ value: endpointInfo.description }).component(); - endPointRow.addItem(nameCell, { CSSStyles: { 'width': '35%', 'font-weight': '600', 'user-select': 'text' } }); - if (hyperlinkedEndpoints.findIndex(e => e === endpointInfo.name) >= 0) { - const linkCell = view.modelBuilder.hyperlink() - .withProps({ - label: endpointInfo.endpoint, - title: endpointInfo.endpoint, - url: endpointInfo.endpoint - }).component(); - endPointRow.addItem(linkCell, { CSSStyles: { 'width': '62%', 'color': '#0078d4', 'text-decoration': 'underline', 'padding-top': '10px', 'overflow': 'hidden', 'text-overflow': 'ellipsis' } }); - } - else { - const endpointCell = - view.modelBuilder.text() - .withProps( - { - value: endpointInfo.endpoint, - title: endpointInfo.endpoint, - CSSStyles: { 'overflow': 'hidden', 'text-overflow': 'ellipsis' } - }) - .component(); - endPointRow.addItem(endpointCell, { CSSStyles: { 'width': '62%', 'user-select': 'text' } }); - } - const copyValueCell = view.modelBuilder.button().component(); - copyValueCell.iconPath = { light: context.asAbsolutePath('resources/light/copy.png'), dark: context.asAbsolutePath('resources/dark/copy_inverse.png') }; - copyValueCell.onDidClick(() => { - void vscode.env.clipboard.writeText(endpointInfo.endpoint); - }); - copyValueCell.title = localize("copyText", "Copy"); - copyValueCell.iconHeight = '14px'; - copyValueCell.iconWidth = '14px'; - endPointRow.addItem(copyValueCell, { CSSStyles: { 'width': '3%', 'padding-top': '10px' } }); - - container.addItem(endPointRow, { CSSStyles: { 'padding-left': '10px', 'border-top': 'solid 1px #ccc', 'box-sizing': 'border-box', 'user-select': 'text' } }); - }); - const endpointsContainer = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'column', width: '540px', height: '100%', position: 'absolute' }).component(); - endpointsContainer.addItem(container, { CSSStyles: { 'padding-top': '25px', 'padding-left': '5px' } }); - - await view.initializeModel(endpointsContainer); - } - }); -} - -function getCustomEndpoint(parentEndpoint: bdc.IEndpointModel, serviceName: string, description: string, serviceUrl?: string): bdc.IEndpointModel { - if (parentEndpoint) { - let endpoint: bdc.IEndpointModel = { - name: serviceName, - description: description, - endpoint: parentEndpoint.endpoint + serviceUrl, - protocol: 'https' - }; - return endpoint; - } - return null; -} - -export enum Endpoint { - gateway = 'gateway', - sparkHistory = 'spark-history', - yarnUi = 'yarn-ui', - appProxy = 'app-proxy', - mgmtproxy = 'mgmtproxy', - managementProxy = 'management-proxy', - logsui = 'logsui', - metricsui = 'metricsui', - controller = 'controller', - sqlServerMaster = 'sql-server-master', - webhdfs = 'webhdfs', - livy = 'livy' -} - -/** - * Gets the localized text to display for a corresponding endpoint - * @param endpointName The endpoint name to get the display text for - * @param description The backup description to use if we don't have our own - */ -function getEndpointDisplayText(endpointName?: string, description?: string): string { - endpointName = endpointName || ''; - switch (endpointName.toLowerCase()) { - case Endpoint.appProxy: - return localize('endpoint.appproxy', "Application Proxy"); - case Endpoint.controller: - return localize('endpoint.controller', "Cluster Management Service"); - case Endpoint.gateway: - return localize('endpoint.gateway', "Gateway to access HDFS files, Spark"); - case Endpoint.managementProxy: - return localize('endpoint.managementproxy', "Management Proxy"); - case Endpoint.mgmtproxy: - return localize('endpoint.mgmtproxy', "Management Proxy"); - case Endpoint.sqlServerMaster: - return localize('endpoint.sqlServerEndpoint', "SQL Server Master Instance Front-End"); - case Endpoint.metricsui: - return localize('endpoint.grafana', "Metrics Dashboard"); - case Endpoint.logsui: - return localize('endpoint.kibana', "Log Search Dashboard"); - case Endpoint.yarnUi: - return localize('endpoint.yarnHistory', "Spark Diagnostics and Monitoring Dashboard"); - case Endpoint.sparkHistory: - return localize('endpoint.sparkHistory', "Spark Jobs Management and Monitoring Dashboard"); - case Endpoint.webhdfs: - return localize('endpoint.webhdfs', "HDFS File System Proxy"); - case Endpoint.livy: - return localize('endpoint.livy', "Proxy for running Spark statements, jobs, applications"); - default: - // Default is to use the description if one was given, otherwise worst case just fall back to using the - // original endpoint name - return description && description.length > 0 ? description : endpointName; - } -} diff --git a/extensions/mssql/src/hdfs/aclEntry.ts b/extensions/mssql/src/hdfs/aclEntry.ts deleted file mode 100644 index f1da8fac97..0000000000 --- a/extensions/mssql/src/hdfs/aclEntry.ts +++ /dev/null @@ -1,384 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { IconPathHelper, IconPath } from '../iconHelper'; -import { groupBy } from '../util/arrays'; -import * as loc from '../localizedConstants'; - -/** - * The permission status of an HDFS path - this consists of : - * - The sticky bit for that path - * - The permission bits for the owner, group and other - * - (Optional) Set of additional ACL entries on this path - */ -export class PermissionStatus { - /** - * - * @param owner The ACL entry object for the owner permissions - * @param group The ACL entry object for the group permissions - * @param other The ACL entry object for the other permissions - * @param stickyBit The sticky bit status for the object. If true the owner/root are - * the only ones who can delete the resource or its contents (if a folder) - * @param aclEntries The ACL entries defined for the object - */ - constructor(public owner: AclEntry, public group: AclEntry, public other: AclEntry, public stickyBit: boolean, public aclEntries: AclEntry[]) { } - - /** - * The permission octal for the path in the form [#]### with each # mapping to : - * 0 (optional) - The sticky bit (1 or 0) - * 1 - The owner permission digit - * 2 - The group permission digit - * 3 - The other permission digit - * @see AclEntryPermission for more information on the permission digits - */ - public get permissionOctal(): string { - // Always use the access scope for the permission octal - it doesn't have a concept of other scopes - return `${this.stickyBit ? '1' : ''}${this.owner.getPermissionDigit(AclEntryScope.access)}${this.group.getPermissionDigit(AclEntryScope.access)}${this.other.getPermissionDigit(AclEntryScope.access)}`; - } -} - -/** - * The type of an ACL entry. Corresponds to the first (or second if a scope is present) field of - * an ACL entry - e.g. user:bob:rwx (user) or default:group::r-- (group) - */ -export enum AclType { - /** - * An ACL entry applied to a specific user. - */ - user = 'user', - /** - * An ACL entry applied to a specific group. - */ - group = 'group', - /** - * An ACL mask entry. - */ - mask = 'mask', - /** - * An ACL entry that applies to all other users that were not covered by one of the more specific ACL entry types. - */ - other = 'other' -} - -/** - * The type of permission on a file - this corresponds to the field in the file status used in commands such as chmod. - * Typically this value is represented as a 3 digit octal - e.g. 740 - where the first digit is the owner, the second - * the group and the third other. @see parseAclPermissionFromOctal - */ -export enum PermissionType { - owner = 'owner', - group = 'group', - other = 'other' -} - -export enum AclEntryScope { - /** - * An ACL entry that is inspected during permission checks to enforce permissions. - */ - access = 'access', - /** - * An ACL entry to be applied to a directory's children that do not otherwise have their own ACL defined. - */ - default = 'default' -} - -/** - * The read, write and execute permissions for an ACL - */ -export class AclEntryPermission { - - constructor(public read: boolean, public write: boolean, public execute: boolean) { } - - /** - * Returns the string representation of the permissions in the form [r-][w-][x-]. - * e.g. - * rwx - * r-- - * --- - */ - public toString() { - return `${this.read ? 'r' : '-'}${this.write ? 'w' : '-'}${this.execute ? 'x' : '-'}`; - } - - /** - * Gets the digit for a permission octal for this permission. This digit is a value - * between 0 and 7 inclusive, which is a bitwise OR the permission flags (r/w/x). - */ - public get permissionDigit(): number { - return (this.read ? 4 : 0) + (this.write ? 2 : 0) + (this.execute ? 1 : 0); - } -} - -/** - * Parses a string representation of a permission into an AclPermission object. The string must consist - * of 3 characters for the read, write and execute permissions where each character is either a r/w/x or - * a -. - * e.g. The following are all valid strings - * rwx - * --- - * -w- - * @param permissionString The string representation of the permission - */ -function parseAclPermission(permissionString: string): AclEntryPermission { - permissionString = permissionString.toLowerCase(); - if (!/^[r\-][w\-][x\-]$/i.test(permissionString)) { - throw new Error(`Invalid permission string ${permissionString}- must match /^[r\-][w\-][x\-]$/i`); - } - return new AclEntryPermission(permissionString[0] === 'r', permissionString[1] === 'w', permissionString[2] === 'x'); -} - -/** - * A single ACL Permission entry - * scope - The scope of the entry @see AclEntryScope - * type - The type of the entry @see AclEntryType - * name - The name of the user/group used to set ACLs Optional. - * displayName - The name to display in the UI - * permission - The permission set for this ACL. @see AclPermission - */ -export class AclEntry { - private readonly permissions = new Map(); - - constructor( - public readonly type: AclType | PermissionType, - public readonly name: string, - public readonly displayName: string, - ) { } - - /** - * Adds a new permission at the specified scope, overwriting the existing permission at that scope if it - * exists - * @param scope The scope to add the new permission at - * @param permission The permission to set - */ - public addPermission(scope: AclEntryScope, permission: AclEntryPermission): void { - this.permissions.set(scope, permission); - } - - /** - * Deletes the permission at the specified scope. - * @param scope The scope to delete the permission for - * @returns True if the entry was successfully deleted, false if not (it didn't exist) - */ - public removePermission(scope: AclEntryScope): boolean { - return this.permissions.delete(scope); - } - - /** - * Gets the permission at the specified scope if one exists - * @param scope The scope to retrieve the permission for - */ - public getPermission(scope: AclEntryScope): AclEntryPermission | undefined { - return this.permissions.get(scope); - } - - /** - * Gets the full list of permissions and their scopes for this entry - */ - public getAllPermissions(): { scope: AclEntryScope, permission: AclEntryPermission }[] { - return Array.from(this.permissions.entries()).map((entry: [AclEntryScope, AclEntryPermission]) => { - return { scope: entry[0], permission: entry[1] }; - }); - } - - /** - * Gets the octal number representing the permission for the specified scope of - * this entry. This will either be a number between 0 and 7 inclusive (which is - * a bitwise OR the permission flags rwx) or undefined if the scope doesn't exist - * for this entry. - */ - public getPermissionDigit(scope: AclEntryScope): number | undefined { - return this.permissions.has(scope) ? this.permissions.get(scope).permissionDigit : undefined; - } - - /** - * Returns the string representation of each ACL Entry in the form [SCOPE:]TYPE:NAME:PERMISSION. - * Note that SCOPE is only displayed if it's default - access is implied if there is no scope - * specified. - * The name is optional and so may be empty. - * Example strings : - * user:bob:rwx - * default:user:bob:rwx - * user::r-x - * default:group::r-- - */ - toAclStrings(includeDefaults: boolean = true): string[] { - return Array.from(this.permissions.entries()).filter((entry: [AclEntryScope, AclEntryPermission]) => includeDefaults || entry[0] !== AclEntryScope.default).map((entry: [AclEntryScope, AclEntryPermission]) => { - return `${entry[0] === AclEntryScope.default ? 'default:' : ''}${getAclEntryType(this.type)}:${this.name}:${entry[1].toString()}`; - }); - } - - /** - * Checks whether this and the specified AclEntry are equal. Two entries are considered equal - * if their scope, type and name are equal. - * @param other The other entry to compare against - */ - public isEqual(other: AclEntry): boolean { - if (!other) { - return false; - } - return AclEntry.compare(this, other) === 0; - } - - /** - * Compares two AclEntry objects for ordering - * @param a The first AclEntry to compare - * @param b The second AclEntry to compare - */ - static compare(a: AclEntry, b: AclEntry): number { - if (a.name === b.name) { - if (a.type === b.type) { - return 0; - } - return a.type.localeCompare(b.type); - } - return a.name.localeCompare(b.name); - } -} - -/** - * Maps the possible entry types into their corresponding values for using in an ACL string - * @param type The type to convert - */ -function getAclEntryType(type: AclType | PermissionType): AclType { - // We only need to map AclPermissionType - AclEntryType is already the - // correct values we're mapping to. - if (type in PermissionType) { - switch (type) { - case PermissionType.owner: - return AclType.user; - case PermissionType.group: - return AclType.group; - case PermissionType.other: - return AclType.other; - default: - throw new Error(`Unknown AclPermissionType : ${type}`); - } - } - return type; -} - -/** - * Parses a complete ACL string into separate AclEntry objects for each entry. A valid string consists of multiple entries - * separated by a comma. - * - * A valid entry must match (default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3}) - * e.g. the following are all valid entries - * user:bob:rwx - * user::rwx - * default::bob:rwx - * group::r-x - * default:other:r-- - * - * So a valid ACL string might look like this - * user:bob:rwx,user::rwx,default::bob:rwx,group::r-x,default:other:r-- - * @param aclString The string representation of the ACL - */ -export function parseAclList(aclString: string): AclEntry[] { - if (aclString === '') { - return []; - } - - if (!/^(default:)?(user|group|mask|other):([A-Za-z_][A-Za-z0-9._-]*)?:([rwx-]{3})?(,(default:)?(user|group|mask|other):([A-Za-z_][A-Za-z0-9._-]*)?:([rwx-]{3})?)*$/.test(aclString)) { - throw new Error(`Invalid ACL string ${aclString}. Expected to match ^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$`); - } - return mergeAclEntries(aclString.split(',').map(aclEntryString => parseAclEntry(aclEntryString))); -} - -/** - * Parses a given string representation of an ACL Entry into an AclEntry object. This method - * assumes the string has already been checked for validity. - * @param aclString The string representation of the ACL entry - */ -function parseAclEntry(aclString: string): AclEntry { - const parts: string[] = aclString.split(':'); - let i = 0; - const scope: AclEntryScope = parts.length === 4 && parts[i++] === 'default' ? AclEntryScope.default : AclEntryScope.access; - let type: AclType; - switch (parts[i++]) { - case 'user': - type = AclType.user; - break; - case 'group': - type = AclType.group; - break; - case 'mask': - type = AclType.mask; - break; - case 'other': - type = AclType.other; - break; - default: - throw new Error(`Unknown ACL Entry type ${parts[i - 1]}`); - } - const name = parts[i++]; - const permission = parseAclPermission(parts[i++]); - const entry = new AclEntry(type, name, name); - entry.addPermission(scope, permission); - return entry; -} - -/** - * Parses an octal in the form [#]### into a combination of an optional sticky bit and a set - * of @see AclEntryPermission. Each digit in the octal corresponds to the sticky bit or a - * particular user type - owner, group and other respectively. - * If the sticky bit exists and its value is 1 then the sticky bit value is set to true. - * Each permission digit is then expected to be a value between 0 and 7 inclusive, which is a bitwise OR the permission flags - * for the file. - * 4 - Read - * 2 - Write - * 1 - Execute - * So an octal of 1730 would map to : - * - sticky === true - * - The owner with rwx permissions - * - The group with -wx permissions - * - All others with --- permissions - * @param octal The octal string to parse - */ -export function parseAclPermissionFromOctal(octal: string): { sticky: boolean, owner: AclEntryPermission, group: AclEntryPermission, other: AclEntryPermission } { - if (!octal || (octal.length !== 3 && octal.length !== 4)) { - throw new Error(`Invalid octal ${octal} - it must be a 3 or 4 digit string`); - } - - const sticky = octal.length === 4 ? octal[0] === '1' : false; - const ownerPermissionDigit = parseInt(octal[octal.length - 3]); - const groupPermissionDigit = parseInt(octal[octal.length - 2]); - const otherPermissionDigit = parseInt(octal[octal.length - 1]); - - return { - sticky: sticky, - owner: new AclEntryPermission((ownerPermissionDigit & 4) === 4, (ownerPermissionDigit & 2) === 2, (ownerPermissionDigit & 1) === 1), - group: new AclEntryPermission((groupPermissionDigit & 4) === 4, (groupPermissionDigit & 2) === 2, (groupPermissionDigit & 1) === 1), - other: new AclEntryPermission((otherPermissionDigit & 4) === 4, (otherPermissionDigit & 2) === 2, (otherPermissionDigit & 1) === 1) - }; -} - -export function getImageForType(type: AclType | PermissionType): { iconPath: IconPath, title: string } { - switch (type) { - case AclType.user: - case PermissionType.owner: - return { iconPath: IconPathHelper.user, title: loc.owner }; - case AclType.group: - case PermissionType.group: - case PermissionType.other: - return { iconPath: IconPathHelper.group, title: loc.group }; - } - return { iconPath: { dark: '', light: '' }, title: '' }; -} - -/** - * Merges a list of AclEntry objects such that the resulting list contains only a single entry for each name/type pair with - * a separate permission for each separate AclEntry - * @param entries The set of AclEntries to merge - */ -function mergeAclEntries(entries: AclEntry[]): AclEntry[] { - const groupedEntries = groupBy(entries, (a, b) => AclEntry.compare(a, b)); // First group the entries together - return groupedEntries.map(entryGroup => { // Now make a single AclEntry for each group and add all the permissions from each group - const entry = new AclEntry(entryGroup[0].type, entryGroup[0].name, entryGroup[0].displayName); - entryGroup.forEach(e => { - e.getAllPermissions().forEach(sp => entry.addPermission(sp.scope, sp.permission)); - }); - return entry; - }); -} diff --git a/extensions/mssql/src/hdfs/fileStatus.ts b/extensions/mssql/src/hdfs/fileStatus.ts deleted file mode 100644 index 080d8ff98c..0000000000 --- a/extensions/mssql/src/hdfs/fileStatus.ts +++ /dev/null @@ -1,112 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { FileType } from '../objectExplorerNodeProvider/fileSources'; - -export const enum HdfsFileType { - File = 'File', - Directory = 'Directory', - Symlink = 'Symlink' -} - -/** - * Maps a @see HdfsFileType to its corresponding @see FileType. Will return undefined if - * passed in type is undefined. - * @param hdfsFileType The HdfsFileType to map from - */ -export function hdfsFileTypeToFileType(hdfsFileType: HdfsFileType | undefined): FileType | undefined { - switch (hdfsFileType) { - case HdfsFileType.Directory: - return FileType.Directory; - case HdfsFileType.File: - return FileType.File; - case HdfsFileType.Symlink: - return FileType.Symlink; - case undefined: - return undefined; - default: - throw new Error(`Unexpected file type ${hdfsFileType}`); - } -} - -export class FileStatus { - /** - * - * @param accessTime - * @param blockSize - * @param group The ACL entry object for the group permissions - * @param length - * @param modificationTime - * @param owner The ACL entry object for the owner permissions - * @param pathSuffix - * @param permission - * @param replication - * @param snapshotEnabled - * @param type - */ - constructor( - /** - * Access time for the file - */ - public readonly accessTime: string, - /** - * The block size of a file. - */ - public readonly blockSize: string, - /** - * The group owner. - */ - public readonly group: string, - /** - * The number of bytes in a file. (0 for directories) - */ - public readonly length: string, - /** - * The modification time. - */ - public readonly modificationTime: string, - /** - * The user who is the owner. - */ - public readonly owner: string, - /** - * The path suffix. - */ - public readonly pathSuffix: string, - /** - * The permission represented as a octal string. - */ - public readonly permission: string, - /** - * The number of replication of a file. - */ - public readonly replication: string, - /** - * Whether a directory is snapshot enabled or not - */ - public readonly snapshotEnabled: string, - /** - * The type of the path object. - */ - public readonly type: HdfsFileType - ) { } -} - -/** - * Parses a fileType string into the corresponding @see HdfsFileType - * @param fileType The fileType string to parse - */ -export function parseHdfsFileType(fileType: string): HdfsFileType { - switch (fileType.toLowerCase()) { - case 'file': - return HdfsFileType.File; - case 'directory': - return HdfsFileType.Directory; - case 'symlink': - return HdfsFileType.Symlink; - default: - throw new Error(`Unknown HdfsFileType '${fileType}'`); - } -} diff --git a/extensions/mssql/src/hdfs/hdfsModel.ts b/extensions/mssql/src/hdfs/hdfsModel.ts deleted file mode 100644 index 6d749a37be..0000000000 --- a/extensions/mssql/src/hdfs/hdfsModel.ts +++ /dev/null @@ -1,143 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { IFileSource, FileType } from '../objectExplorerNodeProvider/fileSources'; -import { PermissionStatus, AclEntry, AclEntryScope, AclType, AclEntryPermission } from './aclEntry'; -import { FileStatus, hdfsFileTypeToFileType } from './fileStatus'; -import * as nls from 'vscode-nls'; - -const localize = nls.loadMessageBundle(); - -/** - * Model for storing the state of a specified file/folder in HDFS - */ -export class HdfsModel { - - private readonly _onPermissionStatusUpdated = new vscode.EventEmitter(); - /** - * Event that's fired anytime changes are made by the model to the @see PermissionStatus - */ - public onPermissionStatusUpdated = this._onPermissionStatusUpdated.event; - - /** - * The @see PermissionStatus of the file/folder - */ - public permissionStatus: PermissionStatus; - - /** - * The @see FileStatus of the file/folder - */ - public fileStatus: FileStatus; - - constructor(private readonly fileSource: IFileSource, private readonly path: string) { - this.refresh().catch(err => console.error('Error refreshing HDFS Model ', err)); - } - - /** - * Refresh the ACL status with the current values on HDFS - */ - public async refresh(): Promise { - [this.permissionStatus, this.fileStatus] = await Promise.all([ - this.fileSource.getAclStatus(this.path), - this.fileSource.getFileStatus(this.path)]); - this._onPermissionStatusUpdated.fire(this.permissionStatus); - } - - /** - * Creates a new ACL Entry and adds it to the list of current entries. Will do nothing - * if a duplicate entry (@see AclEntry.isEqual) exists - * @param name The name of the ACL Entry - * @param type The type of ACL to create - */ - public createAndAddAclEntry(name: string, type: AclType): void { - if (!this.permissionStatus || !name || name.length < 1) { - return; - } - const newEntry = new AclEntry(type, name, name); - newEntry.addPermission(AclEntryScope.access, new AclEntryPermission(true, true, true)); - // Don't add duplicates. This also checks the owner, group and other items - if ([this.permissionStatus.owner, this.permissionStatus.group, this.permissionStatus.other].concat(this.permissionStatus.aclEntries).find(entry => entry.isEqual(newEntry))) { - return; - } - - this.permissionStatus.aclEntries.push(newEntry); - this._onPermissionStatusUpdated.fire(this.permissionStatus); - } - - /** - * Deletes the specified entry from the list of registered - * @param entryToDelete The entry to delete - */ - public deleteAclEntry(entryToDelete: AclEntry): void { - this.permissionStatus.aclEntries = this.permissionStatus.aclEntries.filter(entry => !entry.isEqual(entryToDelete)); - this._onPermissionStatusUpdated.fire(this.permissionStatus); - } - - - /** - * Applies the changes made to this model to HDFS. Note that this will overwrite ALL permissions so any - * permissions that shouldn't change need to still exist and have the same values. - * @param recursive Whether to apply the changes recursively (to all sub-folders and files) - */ - public async apply(recursive: boolean = false): Promise { - await this.applyAclChanges(this.path, hdfsFileTypeToFileType(this.fileStatus ? this.fileStatus.type : undefined)); - if (recursive) { - azdata.tasks.startBackgroundOperation( - { - connection: undefined, - displayName: localize('mssql.recursivePermissionOpStarted', "Applying permission changes recursively under '{0}'", this.path), - description: '', - isCancelable: false, - operation: async op => { - await this.applyToChildrenRecursive(op, this.path); - op.updateStatus(azdata.TaskStatus.Succeeded, localize('mssql.recursivePermissionOpSucceeded', "Permission changes applied successfully.")); - } - } - ); - } - } - - /** - * Recursive call to apply the current set of changes to all children of this path (if any) - * @param op Background operation used to track status of the task - * @param path The path - */ - private async applyToChildrenRecursive(op: azdata.BackgroundOperation, path: string): Promise { - try { - op.updateStatus(azdata.TaskStatus.InProgress, localize('mssql.recursivePermissionOpProgress', "Applying permission changes to '{0}'.", path)); - const files = await this.fileSource.enumerateFiles(path, true); - // Apply changes to all children of this path and then recursively apply to children of any directories - await Promise.all( - files.map(file => this.applyAclChanges(file.path, file.fileType)).concat( - files.filter(f => f.fileType === FileType.Directory).map(d => this.applyToChildrenRecursive(op, d.path))) - ); - } catch (error) { - const errMsg = localize('mssql.recursivePermissionOpError', "Error applying permission changes: {0}", (error instanceof Error ? error.message : error)); - void vscode.window.showErrorMessage(errMsg); - op.updateStatus(azdata.TaskStatus.Failed, errMsg); - } - } - - /** - * Applies the current set of Permissions/ACLs to the specified path - * @param path The path to apply the changes to - */ - private async applyAclChanges(path: string, fileType: FileType | undefined): Promise { - // HDFS won't remove existing default ACLs even if you call setAcl with no default ACLs specified. You - // need to call removeDefaultAcl specifically to remove them. - if (!this.permissionStatus.owner.getPermission(AclEntryScope.default) && - !this.permissionStatus.group.getPermission(AclEntryScope.default) && - !this.permissionStatus.other.getPermission(AclEntryScope.default)) { - await this.fileSource.removeDefaultAcl(path); - } - return Promise.all([ - this.fileSource.setAcl(path, fileType, this.permissionStatus), - this.fileSource.setPermission(path, this.permissionStatus)]); - } -} - - diff --git a/extensions/mssql/src/hdfs/mount.ts b/extensions/mssql/src/hdfs/mount.ts deleted file mode 100644 index a4c424ea69..0000000000 --- a/extensions/mssql/src/hdfs/mount.ts +++ /dev/null @@ -1,19 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -/** - * Information about a HDFS mount to a remote directory - */ -export interface Mount { - mountPath: string; - mountStatus: string; - remotePath: string; -} - -export enum MountStatus { - None = 0, - Mount = 1, - Mount_Child = 2 -} diff --git a/extensions/mssql/src/hdfs/ui/hdfsManageAccessDialog.ts b/extensions/mssql/src/hdfs/ui/hdfsManageAccessDialog.ts deleted file mode 100644 index 82e8ae075d..0000000000 --- a/extensions/mssql/src/hdfs/ui/hdfsManageAccessDialog.ts +++ /dev/null @@ -1,641 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { HdfsModel } from '../hdfsModel'; -import { IFileSource } from '../../objectExplorerNodeProvider/fileSources'; -import { PermissionStatus, AclEntry, AclType, getImageForType, AclEntryScope, AclEntryPermission, PermissionType } from '../../hdfs/aclEntry'; -import { cssStyles } from './uiConstants'; -import * as loc from '../../localizedConstants'; -import { HdfsError } from '../webhdfs'; -import { IconPathHelper } from '../../iconHelper'; -import { HdfsFileType } from '../fileStatus'; - -const permissionsTypeIconColumnWidth = 35; -const permissionsDeleteColumnWidth = 50; - -const permissionsCheckboxColumnWidth = 50; - -const permissionsRowHeight = 35; -const locationLabelHeight = 23; // Fits the text size without too much white space - -const checkboxSize = 20; - - -type PermissionCheckboxesMapping = { - model: AclEntry, - access: { read: azdata.CheckBoxComponent, write: azdata.CheckBoxComponent, execute: azdata.CheckBoxComponent }, - default: { read: azdata.CheckBoxComponent, write: azdata.CheckBoxComponent, execute: azdata.CheckBoxComponent } -}; - -export class ManageAccessDialog { - - private hdfsModel: HdfsModel; - private viewInitialized: boolean = false; - private modelInitialized: boolean = false; - private modelBuilder: azdata.ModelBuilder; - private rootContainer: azdata.FlexContainer; - private rootLoadingComponent: azdata.LoadingComponent; - private stickyCheckbox: azdata.CheckBoxComponent; - private inheritDefaultsCheckbox: azdata.CheckBoxComponent; - private posixPermissionsContainer: azdata.FlexContainer; - private namedUsersAndGroupsPermissionsContainer: azdata.FlexContainer; - private addUserOrGroupInput: azdata.InputBoxComponent; - private dialog: azdata.window.Dialog; - private applyRecursivelyButton: azdata.window.Button; - private posixPermissionCheckboxesMapping: PermissionCheckboxesMapping[] = []; - private namedSectionInheritCheckboxes: azdata.CheckBoxComponent[] = []; - private addUserOrGroupSelectedType: AclType; - private onViewInitializedEvent: vscode.EventEmitter = new vscode.EventEmitter(); - - constructor(private hdfsPath: string, private fileSource: IFileSource) { - this.hdfsModel = new HdfsModel(this.fileSource, this.hdfsPath); - this.hdfsModel.onPermissionStatusUpdated(permissionStatus => this.handlePermissionStatusUpdated(permissionStatus)); - } - - public openDialog(): void { - if (!this.dialog) { - this.dialog = azdata.window.createModelViewDialog(loc.manageAccessTitle, 'HdfsManageAccess', true); - this.dialog.okButton.label = loc.applyText; - - this.applyRecursivelyButton = azdata.window.createButton(loc.applyRecursivelyText); - this.applyRecursivelyButton.onClick(async () => { - try { - azdata.window.closeDialog(this.dialog); - await this.hdfsModel.apply(true); - } catch (err) { - void vscode.window.showErrorMessage(loc.errorApplyingAclChanges(err instanceof HdfsError ? err.message : err)); - } - }); - this.dialog.customButtons = [this.applyRecursivelyButton]; - this.dialog.registerCloseValidator(async (): Promise => { - try { - await this.hdfsModel.apply(); - return true; - } catch (err) { - void vscode.window.showErrorMessage(loc.errorApplyingAclChanges(err instanceof HdfsError ? err.message : err)); - } - return false; - }); - const tab = azdata.window.createTab(loc.manageAccessTitle); - tab.registerContent(async (modelView: azdata.ModelView) => { - this.modelBuilder = modelView.modelBuilder; - - this.rootContainer = modelView.modelBuilder.flexContainer() - .withLayout({ flexFlow: 'column', width: '100%', height: '100%' }) - .component(); - - this.rootLoadingComponent = modelView.modelBuilder.loadingComponent().withItem(this.rootContainer).component(); - - await modelView.initializeModel(this.rootLoadingComponent); - this.modelInitialized = true; - this.handlePermissionStatusUpdated(this.hdfsModel.permissionStatus); - }); - this.dialog.content = [tab]; - } - - this.applyRecursivelyButton.hidden = true; // Always hide the button until we get the status back saying whether this is a directory or not - azdata.window.openDialog(this.dialog); - } - - private initializeView(permissionStatus: PermissionStatus): void { - // We nest the content inside another container for the margins - getting them on the root container isn't supported - const contentContainer = this.modelBuilder.flexContainer() - .withLayout({ flexFlow: 'column', width: '100%', height: '100%' }) - .component(); - this.rootContainer.addItem(contentContainer, { CSSStyles: { 'margin-left': '20px', 'margin-right': '20px' } }); - - const locationContainer = this.modelBuilder.flexContainer().withLayout({ flexFlow: 'row', alignItems: 'center' }).component(); - - const locationLabel = this.modelBuilder.text() - .withProps({ - value: loc.locationTitle, - CSSStyles: { ...cssStyles.titleCss } - }).component(); - - const pathLabel = this.modelBuilder.text() - .withProps({ - value: this.hdfsPath, - title: this.hdfsPath, - height: locationLabelHeight, - CSSStyles: { 'user-select': 'text', 'overflow': 'hidden', 'text-overflow': 'ellipsis', ...cssStyles.titleCss } - }).component(); - - locationContainer.addItem(locationLabel, - { - flex: '0 0 auto', - CSSStyles: { 'margin-bottom': '5px' } - }); - locationContainer.addItem(pathLabel, - { - flex: '1 1 auto', - CSSStyles: { 'border': '1px solid #ccc', 'padding': '5px', 'margin-left': '10px', 'min-height': `${locationLabelHeight}px` } - }); - - contentContainer.addItem(locationContainer, { flex: '0 0 auto', CSSStyles: { 'margin-top': '20px' } }); - - // ===================== - // = Permissions Title = - // ===================== - const permissionsTitle = this.modelBuilder.text() - .withProps({ value: loc.permissionsHeader }) - .component(); - contentContainer.addItem(permissionsTitle, { CSSStyles: { 'margin-top': '15px', ...cssStyles.titleCss } }); - - // ==================== - // = Inherit Defaults = - // ==================== - - // Defaults are only settable for directories - if (this.hdfsModel.fileStatus.type === HdfsFileType.Directory) { - contentContainer.addItem(this.createInheritDefaultsCheckbox()); - } - - // ========== - // = Sticky = - // ========== - this.stickyCheckbox = this.modelBuilder.checkBox() - .withProps({ - width: checkboxSize, - height: checkboxSize, - checked: permissionStatus.stickyBit, - label: loc.stickyLabel - }).component(); - this.stickyCheckbox.onChanged(() => { - this.hdfsModel.permissionStatus.stickyBit = this.stickyCheckbox.checked; - }); - contentContainer.addItem(this.stickyCheckbox); - - // ============================= - // = POSIX permissions section = - // ============================= - - const posixPermissionsSectionHeaderRow = this.createPermissionsSectionHeaderRow(0, 0); - contentContainer.addItem(posixPermissionsSectionHeaderRow, { CSSStyles: { ...cssStyles.tableHeaderLayoutCss } }); - - this.posixPermissionsContainer = this.modelBuilder.flexContainer().withLayout({ flexFlow: 'column' }).component(); - contentContainer.addItem(this.posixPermissionsContainer, { flex: '0 0 auto', CSSStyles: { 'margin-bottom': '20px' } }); - - // =========================== - // = Add User Or Group Input = - // =========================== - - const addUserOrGroupTitle = this.modelBuilder.text() - .withProps({ value: loc.addUserOrGroupHeader, CSSStyles: { 'margin-block-start': '0px', 'margin-block-end': '10px' } }) - .component(); - contentContainer.addItem(addUserOrGroupTitle, { CSSStyles: { 'margin-top': '15px', ...cssStyles.titleCss } }); - - const typeContainer = this.modelBuilder.flexContainer().component(); - const aclEntryTypeGroup = 'aclEntryType'; - const userTypeButton = this.createRadioButton(this.modelBuilder, loc.userLabel, aclEntryTypeGroup, AclType.user); - const groupTypeButton = this.createRadioButton(this.modelBuilder, loc.groupLabel, aclEntryTypeGroup, AclType.group); - userTypeButton.checked = true; - this.addUserOrGroupSelectedType = AclType.user; - - typeContainer.addItems([userTypeButton, groupTypeButton], { flex: '0 0 auto' }); - contentContainer.addItem(typeContainer, { flex: '0 0 auto', CSSStyles: { 'margin-bottom': '5px' } }); - const addUserOrGroupInputRow = this.modelBuilder.flexContainer().component(); - - this.addUserOrGroupInput = this.modelBuilder.inputBox() - .withProps({ - inputType: 'text', - placeHolder: loc.enterNamePlaceholder, - width: 250, - stopEnterPropagation: true - }) - .component(); - this.addUserOrGroupInput.onEnterKeyPressed((value: string) => { - this.hdfsModel.createAndAddAclEntry(value, this.addUserOrGroupSelectedType); - this.addUserOrGroupInput.value = ''; - }); - const addUserOrGroupButton = this.modelBuilder.button().withProps({ - label: loc.addLabel, - width: 75, - secondary: true - }).component(); - addUserOrGroupButton.onDidClick(() => { - this.hdfsModel.createAndAddAclEntry(this.addUserOrGroupInput.value, this.addUserOrGroupSelectedType); - this.addUserOrGroupInput.value = ''; - }); - addUserOrGroupButton.enabled = false; // Init to disabled since we don't have any name entered in yet - this.addUserOrGroupInput.onTextChanged(() => { - addUserOrGroupButton.enabled = this.addUserOrGroupInput.value !== ''; - }); - - addUserOrGroupInputRow.addItem(this.addUserOrGroupInput, { flex: '0 0 auto' }); - addUserOrGroupInputRow.addItem(addUserOrGroupButton, { flex: '0 0 auto', CSSStyles: { 'margin-left': '20px' } }); - - contentContainer.addItem(addUserOrGroupInputRow, { flex: '0 0 auto', CSSStyles: { 'margin-bottom': '20px' } }); - - // ================================================= - // = Named Users and Groups permissions header row = - // ================================================= - - const namedUsersAndGroupsSectionsHeaderRow = this.createPermissionsSectionHeaderRow(permissionsDeleteColumnWidth, permissionsCheckboxColumnWidth); - contentContainer.addItem(namedUsersAndGroupsSectionsHeaderRow, { CSSStyles: { ...cssStyles.tableHeaderLayoutCss } }); - - this.namedUsersAndGroupsPermissionsContainer = this.modelBuilder.flexContainer() - .withLayout({ flexFlow: 'column' }) - .component(); - contentContainer.addItem(this.namedUsersAndGroupsPermissionsContainer, { flex: '1', CSSStyles: { 'overflow': 'scroll', 'min-height': '200px' } }); - this.viewInitialized = true; - this.onViewInitializedEvent.fire(); - } - - private handlePermissionStatusUpdated(permissionStatus: PermissionStatus): void { - if (!permissionStatus || !this.modelInitialized) { - return; - } - - // If this is the first time go through and create the UI components now that we have a model to use - if (!this.viewInitialized) { - this.initializeView(permissionStatus); - } - - this.eventuallyRunOnInitialized(() => { - this.stickyCheckbox.checked = permissionStatus.stickyBit; - if (this.hdfsModel.fileStatus.type === HdfsFileType.Directory) { - this.inheritDefaultsCheckbox.checked = - !permissionStatus.owner.getPermission(AclEntryScope.default) && - !permissionStatus.group.getPermission(AclEntryScope.default) && - !permissionStatus.other.getPermission(AclEntryScope.default); - } - - this.applyRecursivelyButton.hidden = this.hdfsModel.fileStatus.type !== HdfsFileType.Directory; - - this.posixPermissionsContainer.clearItems(); - - const posixPermissionData = [permissionStatus.owner, permissionStatus.group, permissionStatus.other].map(aclEntry => { - return this.createPermissionsTableRow(aclEntry, false/*includeDelete*/, false/*includeInherit*/); - }); - - const posixPermissionsNamesColumnWidth = 800 + (this.hdfsModel.fileStatus.type === HdfsFileType.Directory ? 0 : permissionsCheckboxColumnWidth * 3); - const namedUsersAndGroupsPermissionsNamesColumnWidth = 700 + (this.hdfsModel.fileStatus.type === HdfsFileType.Directory ? 0 : permissionsCheckboxColumnWidth * 3); - - // Default set of columns that are always shown - let posixPermissionsColumns = [ - this.createTableColumn('', loc.userOrGroupIcon, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn('', loc.defaultUserAndGroups, posixPermissionsNamesColumnWidth, azdata.DeclarativeDataType.string), - this.createTableColumn(loc.readHeader, `${loc.accessHeader} ${loc.readHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.writeHeader, `${loc.accessHeader} ${loc.writeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.executeHeader, `${loc.accessHeader} ${loc.executeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component)]; - let namedUsersAndGroupsColumns = [ - this.createTableColumn('', loc.userOrGroupIcon, 50, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.namedUsersAndGroupsHeader, loc.namedUsersAndGroupsHeader, namedUsersAndGroupsPermissionsNamesColumnWidth, azdata.DeclarativeDataType.string), - this.createTableColumn(loc.readHeader, `${loc.accessHeader} ${loc.readHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.writeHeader, `${loc.accessHeader} ${loc.writeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.executeHeader, `${loc.accessHeader} ${loc.executeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component)]; - - // Additional columns that are only shown for directories - if (this.hdfsModel.fileStatus.type === HdfsFileType.Directory) { - posixPermissionsColumns = posixPermissionsColumns.concat([ - this.createTableColumn(loc.readHeader, `${loc.defaultHeader} ${loc.readHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.writeHeader, `${loc.defaultHeader} ${loc.writeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.executeHeader, `${loc.defaultHeader} ${loc.executeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component) - ]); - namedUsersAndGroupsColumns = namedUsersAndGroupsColumns.concat([ - this.createTableColumn(loc.inheritDefaultsLabel, loc.inheritDefaultsLabel, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.readHeader, `${loc.defaultHeader} ${loc.readHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.writeHeader, `${loc.defaultHeader} ${loc.writeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - this.createTableColumn(loc.executeHeader, `${loc.defaultHeader} ${loc.executeHeader}`, permissionsCheckboxColumnWidth, azdata.DeclarativeDataType.component), - ]); - } - namedUsersAndGroupsColumns.push(this.createTableColumn('', loc.deleteTitle, permissionsDeleteColumnWidth, azdata.DeclarativeDataType.component)); - - const posixPermissionsTable = this.modelBuilder.declarativeTable() - .withProps( - { - columns: posixPermissionsColumns, - data: posixPermissionData - }).component(); - - this.posixPermissionsContainer.addItem(posixPermissionsTable, { CSSStyles: { 'margin-right': '12px' } }); - - this.namedUsersAndGroupsPermissionsContainer.clearItems(); - - const namedUsersAndGroupsData = permissionStatus.aclEntries.map(aclEntry => { - return this.createPermissionsTableRow(aclEntry, true/*includeDelete*/, this.hdfsModel.fileStatus.type === HdfsFileType.Directory/*includeInherit*/); - }); - - const namedUsersAndGroupsTable = this.modelBuilder.declarativeTable() - .withProps( - { - columns: namedUsersAndGroupsColumns, - data: namedUsersAndGroupsData - }).component(); - - this.namedUsersAndGroupsPermissionsContainer.addItem(namedUsersAndGroupsTable); - - this.rootLoadingComponent.loading = false; - - void this.addUserOrGroupInput.focus(); - }); - } - - private createRadioButton(modelBuilder: azdata.ModelBuilder, label: string, name: string, aclEntryType: AclType): azdata.RadioButtonComponent { - const button = modelBuilder.radioButton().withProps({ label: label, name: name }).component(); - button.onDidClick(() => { - this.addUserOrGroupSelectedType = aclEntryType; - }); - return button; - } - - private createTableColumn(header: string, ariaLabel: string, width: number, type: azdata.DeclarativeDataType): azdata.DeclarativeTableColumn { - return { - displayName: header, - ariaLabel: ariaLabel, - valueType: type, - isReadOnly: true, - width: width, - headerCssStyles: { - 'border': 'none', - 'padding': '0px', - ...cssStyles.permissionsTableHeaderCss - }, - rowCssStyles: { - 'border-top': 'solid 1px #ccc', - 'border-bottom': 'solid 1px #ccc', - 'border-left': 'none', - 'border-right': 'none', - 'padding': '0px' - }, - }; - } - - private createImageComponent(type: AclType | PermissionType): azdata.ImageComponent { - const imageProperties = getImageForType(type); - return this.modelBuilder.image() - .withProps({ - iconPath: imageProperties.iconPath, - width: permissionsTypeIconColumnWidth, - height: permissionsRowHeight, - iconWidth: 20, - iconHeight: 20, - title: imageProperties.title - }).component(); - } - - private createPermissionsTableRow(aclEntry: AclEntry, includeDelete: boolean, includeInherit: boolean): any[] { - // Access Read - const accessReadComponents = createCheckbox(this.modelBuilder, aclEntry.getPermission(AclEntryScope.access).read, true, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.accessHeader} ${loc.readHeader}`); - accessReadComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.access).read = accessReadComponents.checkbox.checked; - }); - - // Access Write - const accessWriteComponents = createCheckbox(this.modelBuilder, aclEntry.getPermission(AclEntryScope.access).write, true, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.accessHeader} ${loc.writeHeader}`); - accessWriteComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.access).write = accessWriteComponents.checkbox.checked; - }); - - // Access Execute - const accessExecuteComponents = createCheckbox(this.modelBuilder, aclEntry.getPermission(AclEntryScope.access).execute, true, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.accessHeader} ${loc.executeHeader}`); - accessExecuteComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.access).execute = accessExecuteComponents.checkbox.checked; - }); - - const permissionsCheckboxesMapping: PermissionCheckboxesMapping = { - model: aclEntry, - access: { read: accessReadComponents.checkbox, write: accessWriteComponents.checkbox, execute: accessExecuteComponents.checkbox }, - default: { read: undefined, write: undefined, execute: undefined } - }; - - let row = [ - this.createImageComponent(aclEntry.type), - aclEntry.displayName, - accessReadComponents.container, - accessWriteComponents.container, - accessExecuteComponents.container - ]; - - // Default permissions can only be set on directories - if (this.hdfsModel.fileStatus.type === HdfsFileType.Directory) { - const defaultPermission = aclEntry.getPermission(AclEntryScope.default); - - // Default Read - const defaultReadCheckboxComponents = createCheckbox(this.modelBuilder, defaultPermission && defaultPermission.read, !!defaultPermission, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.defaultHeader} ${loc.readHeader}`); - defaultReadCheckboxComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.default).read = defaultReadCheckboxComponents.checkbox.checked; - }); - - // Default Write - const defaultWriteCheckboxComponents = createCheckbox(this.modelBuilder, defaultPermission && defaultPermission.write, !!defaultPermission, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.defaultHeader} ${loc.writeHeader}`); - defaultWriteCheckboxComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.default).write = defaultWriteCheckboxComponents.checkbox.checked; - }); - - // Default Execute - const defaultExecuteCheckboxComponents = createCheckbox(this.modelBuilder, defaultPermission && defaultPermission.execute, !!defaultPermission, permissionsCheckboxColumnWidth, permissionsRowHeight, `${loc.defaultHeader} ${loc.executeHeader}`); - defaultExecuteCheckboxComponents.checkbox.onChanged(() => { - aclEntry.getPermission(AclEntryScope.default).execute = defaultExecuteCheckboxComponents.checkbox.checked; - }); - - permissionsCheckboxesMapping.default = { read: defaultReadCheckboxComponents.checkbox, write: defaultWriteCheckboxComponents.checkbox, execute: defaultExecuteCheckboxComponents.checkbox }; - - if (includeInherit) { - const inheritCheckboxComponents = createCheckbox(this.modelBuilder, !defaultPermission, !this.inheritDefaultsCheckbox.checked, permissionsCheckboxColumnWidth, permissionsRowHeight, loc.inheritDefaultsLabel); - inheritCheckboxComponents.checkbox.onChanged(() => { - defaultReadCheckboxComponents.checkbox.enabled = !inheritCheckboxComponents.checkbox.checked; - defaultWriteCheckboxComponents.checkbox.enabled = !inheritCheckboxComponents.checkbox.checked; - defaultExecuteCheckboxComponents.checkbox.enabled = !inheritCheckboxComponents.checkbox.checked; - if (inheritCheckboxComponents.checkbox.checked) { - aclEntry.removePermission(AclEntryScope.default); - defaultReadCheckboxComponents.checkbox.checked = false; - defaultWriteCheckboxComponents.checkbox.checked = false; - defaultExecuteCheckboxComponents.checkbox.checked = false; - } else { - // Default to the access settings - this is what HDFS does if you don't - // specify the complete set of default ACLs for owner, owning group and other - const accessRead = accessReadComponents.checkbox.checked; - const accessWrite = accessWriteComponents.checkbox.checked; - const accessExecute = accessExecuteComponents.checkbox.checked; - defaultReadCheckboxComponents.checkbox.checked = accessRead; - defaultWriteCheckboxComponents.checkbox.checked = accessWrite; - defaultExecuteCheckboxComponents.checkbox.checked = accessExecute; - aclEntry.addPermission(AclEntryScope.default, - new AclEntryPermission(accessRead, accessWrite, accessExecute)); - } - }); - this.namedSectionInheritCheckboxes.push(inheritCheckboxComponents.checkbox); - row.push(inheritCheckboxComponents.container); - } - - this.posixPermissionCheckboxesMapping.push(permissionsCheckboxesMapping); - - row = row.concat([ - defaultReadCheckboxComponents.container, - defaultWriteCheckboxComponents.container, - defaultExecuteCheckboxComponents.container - ]); - } - - if (includeDelete) { - const deleteButton = this.modelBuilder.button() - .withProps( - { - label: '', - title: loc.deleteTitle, - iconPath: IconPathHelper.delete, - width: 20, - height: 20 - }) - .component(); - deleteButton.onDidClick(() => { this.hdfsModel.deleteAclEntry(aclEntry); }); - row.push(deleteButton); - } - - return row; - } - - private createInheritDefaultsCheckbox(): azdata.CheckBoxComponent { - this.inheritDefaultsCheckbox = this.modelBuilder.checkBox() - .withProps({ - width: checkboxSize, - height: checkboxSize, - checked: false, // Will be set when we get the model update - label: loc.inheritDefaultsLabel - }) - .component(); - - this.inheritDefaultsCheckbox.onChanged(() => { - if (this.inheritDefaultsCheckbox.checked) { - this.namedSectionInheritCheckboxes.forEach(c => { - c.enabled = false; - c.checked = true; - }); - } else { - this.namedSectionInheritCheckboxes.forEach(c => { - c.enabled = true; - c.checked = false; - }); - } - // Go through each of the rows for owner/owning group/other and update - // their checkboxes based on the new value of the inherit checkbox - this.posixPermissionCheckboxesMapping.forEach(m => { - m.default.read.enabled = !this.inheritDefaultsCheckbox.checked; - m.default.write.enabled = !this.inheritDefaultsCheckbox.checked; - m.default.execute.enabled = !this.inheritDefaultsCheckbox.checked; - if (this.inheritDefaultsCheckbox.checked) { - m.model.removePermission(AclEntryScope.default); - m.default.read.checked = false; - m.default.write.checked = false; - m.default.execute.checked = false; - } else { - // Default to the access settings - this is what HDFS does if you don't - // specify the complete set of default ACLs for owner, owning group and other - const accessRead = m.access.read.checked; - const accessWrite = m.access.write.checked; - const accessExecute = m.access.execute.checked; - m.default.read.checked = accessRead; - m.default.write.checked = accessWrite; - m.default.execute.checked = accessExecute; - m.model.addPermission(AclEntryScope.default, new AclEntryPermission(accessRead, accessWrite, accessExecute)); - } - }); - }); - return this.inheritDefaultsCheckbox; - } - /** - * Creates the header row for the permissions tables. This contains headers for the name and read/write/execute for the - * access section. If the path is for a directory then a default section is included for specifying default permissions. - * @param rightSpacerWidth The amount of space to include on the right to correctly align the headers with the - * @param middleSpacerWidth The amount of space to include between the text to correctly align the headers with the table sections - */ - private createPermissionsSectionHeaderRow(rightSpacerWidth: number, middleSpacerWidth: number): azdata.FlexContainer { - // Section Headers - const sectionHeaderContainer = this.modelBuilder.flexContainer().withLayout({ flexFlow: 'row', justifyContent: 'flex-end' }).component(); - - // Access - const accessSectionHeader = this.modelBuilder.text() - .withProps({ - value: loc.accessHeader, - ariaHidden: true, - CSSStyles: { - // This covers 3 checkbox columns - 'width': `${permissionsCheckboxColumnWidth * 3}px`, - 'min-width': `${permissionsCheckboxColumnWidth * 3}px`, - ...cssStyles.permissionsTableHeaderCss - } - }) - .component(); - sectionHeaderContainer.addItem(accessSectionHeader, { flex: '0 0 auto' }); - - // Only show default section for directories - if (this.hdfsModel.fileStatus.type === HdfsFileType.Directory) { - // Middle spacer - const middleSpacer = this.modelBuilder.text().withProps({ CSSStyles: { 'width': `${middleSpacerWidth}px`, 'min-width': `${middleSpacerWidth}px` } }).component(); - sectionHeaderContainer.addItem(middleSpacer, { flex: '0 0 auto' }); - - // Default - const defaultSectionHeader = this.modelBuilder.text() - .withProps({ - value: loc.defaultHeader, - ariaHidden: true, - CSSStyles: { - // This covers 3 checkbox columns - 'width': `${permissionsCheckboxColumnWidth * 3}px`, - 'min-width': `${permissionsCheckboxColumnWidth * 3}px`, - ...cssStyles.permissionsTableHeaderCss - } - }) - .component(); - sectionHeaderContainer.addItem(defaultSectionHeader, { flex: '0 0 auto' }); - } - - // Right spacer - const rightSpacer = this.modelBuilder.text().withProps({ CSSStyles: { 'width': `${rightSpacerWidth}px`, 'min-width': `${rightSpacerWidth}px` } }).component(); - sectionHeaderContainer.addItem(rightSpacer, { flex: '0 0 auto' }); - - return sectionHeaderContainer; - } - - /** - * Runs the specified action when the component is initialized. If already initialized just runs - * the action immediately. - * @param action The action to be ran when the page is initialized - */ - protected eventuallyRunOnInitialized(action: () => void): void { - if (!this.viewInitialized) { - this.onViewInitializedEvent.event(() => { - try { - action(); - } catch (error) { - console.error(`Unexpected error running onInitialized action for Manage Access dialog : ${error}`); - } - }); - } else { - action(); - } - } -} - -/** - * Creates a checkbox to be hosted inside of a table cell - * @param builder The ModelBuilder used to create the components - * @param checked Whether the checkbox is initially checked or not - * @param enabled Whether the checkbox is initially enabled or not - * @param containerWidth The width of the container holding the checkbox - * @param containerHeight The height of the container holding the checkbox - * @param ariaLabel The aria label to apply to the checkbox - */ -function createCheckbox(builder: azdata.ModelBuilder, checked: boolean, enabled: boolean, containerWidth: number, containerHeight: number, ariaLabel: string): { container: azdata.FlexContainer, checkbox: azdata.CheckBoxComponent } { - const checkbox = builder.checkBox() - .withProps({ - checked: checked, - enabled: enabled, - height: checkboxSize, - width: checkboxSize, - ariaLabel: ariaLabel - }).component(); - const container = builder.flexContainer() - .withLayout({ width: containerWidth, height: containerHeight }) - .component(); - container.addItem(checkbox, { CSSStyles: { ...cssStyles.permissionCheckboxCss } }); - return { - container: container, - checkbox: checkbox - }; -} diff --git a/extensions/mssql/src/hdfs/ui/uiConstants.ts b/extensions/mssql/src/hdfs/ui/uiConstants.ts deleted file mode 100644 index a99d3dbade..0000000000 --- a/extensions/mssql/src/hdfs/ui/uiConstants.ts +++ /dev/null @@ -1,13 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -export namespace cssStyles { - export const tableBorderCss = '1px solid #ccc'; - export const titleCss = { 'font-size': '20px', 'font-weight': '600', 'margin-block-end': '0px', 'margin-block-start': '0px' }; - export const tableHeaderCss = { 'font-weight': 'bold', 'text-transform': 'uppercase', 'font-size': '10px', 'user-select': 'text' }; - export const permissionsTableHeaderCss = { ...tableHeaderCss, 'text-align': 'center' }; - export const permissionCheckboxCss = { 'margin-top': '5px', 'margin-left': '13px' }; - export const tableHeaderLayoutCss = { 'padding-left': '10px', 'box-sizing': 'border-box', 'user-select': 'text', 'margin-right': '12px' }; -} diff --git a/extensions/mssql/src/hdfs/webhdfs.ts b/extensions/mssql/src/hdfs/webhdfs.ts deleted file mode 100644 index 73c1098a1b..0000000000 --- a/extensions/mssql/src/hdfs/webhdfs.ts +++ /dev/null @@ -1,995 +0,0 @@ -// This code is originally from https://github.com/harrisiirak/webhdfs -// License: https://github.com/harrisiirak/webhdfs/blob/master/LICENSE - -import * as url from 'url'; -import * as fs from 'fs'; -import * as querystring from 'querystring'; -import * as request from 'request'; -import * as BufferStreamReader from 'buffer-stream-reader'; -import { Cookie } from 'tough-cookie'; -import * as through from 'through2'; -import * as nls from 'vscode-nls'; -import * as auth from '../util/auth'; -import { IHdfsOptions, IRequestParams, FileType } from '../objectExplorerNodeProvider/fileSources'; -import { PermissionStatus, AclEntry, parseAclList, PermissionType, parseAclPermissionFromOctal, AclEntryScope, AclType } from './aclEntry'; -import { Mount } from './mount'; -import { everyoneName, ownerPostfix, owningGroupPostfix } from '../localizedConstants'; -import { FileStatus, parseHdfsFileType } from './fileStatus'; -import { Readable, Transform } from 'stream'; - -const localize = nls.loadMessageBundle(); -const ErrorMessageInvalidDataStructure = localize('webhdfs.invalidDataStructure', "Invalid Data Structure"); - -const emitError = (instance: request.Request | Transform, err: any) => { - const isErrorEmitted = (instance as any).errorEmitted; - - if (!isErrorEmitted) { - instance.emit('error', err); - instance.emit('finish'); - } - - (instance as any).errorEmitted = true; -}; - -export class WebHDFS { - private _requestParams: IRequestParams; - private _opts: IHdfsOptions; - private _url: any; - private _authCookie: Cookie; - constructor(opts: IHdfsOptions, requestParams: IRequestParams) { - if (!(this instanceof WebHDFS)) { - return new WebHDFS(opts, requestParams); - } - - let missingProps = ['host', 'port', 'path'] - .filter((p: keyof IHdfsOptions) => !opts.hasOwnProperty(p) || !opts[p]); - if (missingProps && missingProps.length > 0) { - throw new Error(localize('webhdfs.missingProperties', - "Unable to create WebHDFS client due to missing options: ${0}", missingProps.join(', '))); - } - - this._requestParams = requestParams || {}; - this._requestParams.timeout = this._requestParams.timeout || 10000; - - this._opts = opts; - this._url = { - protocol: opts.protocol || 'http', - hostname: opts.host.trim(), - port: opts.port || 80, - pathname: opts.path - }; - } - - private checkArgDefined(argName: string, argValue: any): void { - if (!argValue) { - throw new Error(localize('webhdfs.undefinedArgument', "'${0}' is undefined.", argName)); - } - } - - /** - * Generate WebHDFS REST API endpoint URL for given operation - * - * @param operation WebHDFS operation name - * @returns WebHDFS REST API endpoint URL - */ - private getOperationEndpoint(operation: string, path: string, params?: object): string { - let endpoint = this._url; - endpoint.pathname = encodeURI(this._opts.path + path); - let searchOpts = Object.assign( - { 'op': operation }, - this._opts.user ? { 'user.name': this._opts.user } : {}, - params || {} - ); - endpoint.search = querystring.stringify(searchOpts); - return url.format(endpoint); - } - - /** - * Gets localized status message for given status code - * - * @param statusCode Http status code - * @returns status message - */ - private toStatusMessage(statusCode: number): string { - let statusMessage: string = undefined; - switch (statusCode) { - case 400: statusMessage = localize('webhdfs.httpError400', "Bad Request"); break; - case 401: statusMessage = localize('webhdfs.httpError401', "Unauthorized"); break; - case 403: statusMessage = localize('webhdfs.httpError403', "Forbidden"); break; - case 404: statusMessage = localize('webhdfs.httpError404', "Not Found"); break; - case 500: statusMessage = localize('webhdfs.httpError500', "Internal Server Error"); break; - // TODO: define more messages here - default: break; - } - return statusMessage; - } - - /** - * Gets status message from response - * - * @param response response object - * @returns Error message interpreted by status code - */ - private getStatusMessage(response: request.Response): string { - if (!response) { return undefined; } - let statusMessage: string = this.toStatusMessage(response.statusCode) - || (response && response.statusMessage); - return statusMessage; - } - - /** - * Gets remote exception message from response body - * - * @param responseBody response body - * @returns Error message interpreted by status code - */ - private getRemoteExceptionMessage(responseBody: any): string { - if (!responseBody) { return undefined; } - if (typeof responseBody === 'string') { - try { - responseBody = JSON.parse(responseBody); - } catch { } - } - let remoteExceptionMessage: string = undefined; - if (responseBody.hasOwnProperty('RemoteException') - && responseBody.RemoteException.hasOwnProperty('message')) { - remoteExceptionMessage = responseBody.RemoteException.message; - } - return remoteExceptionMessage; - } - - /** - * Generates error message descriptive as much as possible - * - * @param statusMessage status message - * @param [remoteExceptionMessage] remote exception message - * @param [error] error - * @returns error message - */ - private getErrorMessage(statusMessage: string, remoteExceptionMessage?: string, error?: any): string { - statusMessage = statusMessage === '' ? undefined : statusMessage; - remoteExceptionMessage = remoteExceptionMessage === '' ? undefined : remoteExceptionMessage; - let messageFromError: string = error ? (error['message'] || error.toString()) : undefined; - return statusMessage && remoteExceptionMessage ? - `${statusMessage} (${remoteExceptionMessage})` : - statusMessage || remoteExceptionMessage || messageFromError || - localize('webhdfs.unknownError', "Unknown Error"); - } - - /** - * Parse error state from response and return valid Error object - * - * @param response response object - * @param [responseBody] response body - * @param [error] error - * @returns HdfsError object - */ - private parseError(response: request.Response, responseBody?: any, error?: any): HdfsError { - let statusMessage: string = this.getStatusMessage(response); - if (!responseBody && response) { - responseBody = response.body; - } - let remoteExceptionMessage: string = this.getRemoteExceptionMessage(responseBody); - let errorMessage: string = this.getErrorMessage(statusMessage, remoteExceptionMessage, error); - return new HdfsError(errorMessage, response && response.statusCode, - response && response.statusMessage, remoteExceptionMessage, error); - } - - /** - * Check if response is redirect - * - * @param response response object - * @returns if response is redirect - */ - private isRedirect(response: request.Response): boolean { - return [301, 307].indexOf(response.statusCode) !== -1 && - response.headers.hasOwnProperty('location'); - } - - /** - * Check if response is successful - * - * @param response response object - * @returns if response is successful - */ - private isSuccess(response: request.Response): boolean { - return [200, 201].indexOf(response.statusCode) !== -1; - } - - /** - * Check if response is error - * - * @param response response object - * @returns if response is error - */ - private isError(response: request.Response): boolean { - return [400, 401, 402, 403, 404, 500].indexOf(response.statusCode) !== -1; - } - - /** - * Send a request to WebHDFS REST API - * - * @param method HTTP method - * @param urlValue - * @param opts Options for request - * @returns void - */ - private sendRequest(method: string, urlValue: string, opts: object, callback: (error: HdfsError, response: request.Response) => void): void { - if (!callback) { - return; - } - let requestParams = Object.assign( - { method: method, url: urlValue, json: true }, - this._requestParams, - opts || {} - ); - this.ensureCookie(requestParams); - // Add a wrapper to handle unauthorized requests by adding kerberos auth steps - let handler = (error: any, response: request.Response) => { - if (error && error.statusCode === 401 && this._requestParams.isKerberos) { - this.requestWithKerberosSync(requestParams, callback); - } else { - callback(error, response); - } - }; - this.doSendRequest(requestParams, handler); - } - - private ensureCookie(requestParams: { headers?: { [key: string]: string } }) { - if (this._authCookie && this._authCookie.expiryTime() > Date.now()) { - requestParams.headers = requestParams.headers || {}; - requestParams.headers['cookie'] = `${this._authCookie.key}=${this._authCookie.value}`; - } - } - - private doSendRequest(requestParams: any, callback: (error: HdfsError, response: any) => void): void { - request(requestParams, (error: any, response: request.Response, body: any) => { - if (error || this.isError(response)) { - let hdfsError = this.parseError(response, body, error); - callback(hdfsError, response); - } - else if (this.isSuccess(response)) { - callback(undefined, response); - } - else { - let hdfsError = new HdfsError(localize('webhdfs.unexpectedRedirect', "Unexpected Redirect"), response && response.statusCode, response && response.statusMessage, this.getRemoteExceptionMessage(body || response.body), error); - callback(hdfsError, response); - } - }); - } - - /** - * Authenticates using kerberos as part of a request, and saves cookie if successful. - * Ideally would use request's built-in cookie functionality but this isn't working with non-public domains. - * Instead, save the cookie in this module and reuse if not expired - */ - private requestWithKerberosSync(requestParams: any, callback: (error: HdfsError, response: request.Response) => void) { - this.setKerberosAuthOnParams(requestParams).then(() => { - this.doSendRequest(requestParams, (error, response) => { - if (error) { - // Pass on the callback - callback(error, response); - } - else { - // Capture cookie for future requests - this.setAuthCookie(response); - callback(error, response); - } - }); - }).catch((err) => { - callback(err, undefined); - }); - } - - private async setKerberosAuthOnParams(requestParams: any): Promise { - let kerberosToken = await auth.authenticateKerberos(this._opts.host); - requestParams.headers = { Authorization: `Negotiate ${kerberosToken}` }; - return requestParams; - } - - private setAuthCookie(response: request.Response) { - try { - if (response && response.headers && response.headers['set-cookie']) { - let cookies: Cookie[]; - if (response.headers['set-cookie'] instanceof Array) { - cookies = response.headers['set-cookie'].map(c => Cookie.parse(c)); - } - else { - cookies = [Cookie.parse(response.headers['set-cookie'])]; - } - this._authCookie = cookies[0]; - } - } catch { } - } - - /** - * Change file permissions - * @returns void - */ - public chmod(path: string, mode: string, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - this.checkArgDefined('mode', mode); - - let endpoint = this.getOperationEndpoint('setpermission', path, { permission: mode }); - this.sendRequest('PUT', endpoint, undefined, (error) => { - return callback && callback(error); - }); - } - - /** - * Change file owner - * - * @param path - * @param userId User name - * @param groupId Group name - * @param callback - * @returns void - */ - public chown(path: string, userId: string, groupId: string, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - this.checkArgDefined('userId', userId); - this.checkArgDefined('groupId', groupId); - - let endpoint = this.getOperationEndpoint('setowner', path, { - owner: userId, - group: groupId - }); - - this.sendRequest('PUT', endpoint, undefined, (error) => { - if (callback) { - callback(error); - } - }); - } - - /** - * List the status of a path - * - * @returns void - */ - public listStatus(path: string, callback: (error: HdfsError, files: FileStatus[]) => void): void { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('liststatus', path); - this.sendRequest('GET', endpoint, undefined, (error, response) => { - if (!callback) { return; } - - let files: any[] = []; - if (error) { - callback(error, undefined); - } else if (response.body.hasOwnProperty('FileStatuses') - && response.body.FileStatuses.hasOwnProperty('FileStatus')) { - files = (response.body.FileStatuses.FileStatus).map(fs => { - return new FileStatus( - fs.accessTime || '', - fs.blockSize || '', - fs.group || '', - fs.length || '', - fs.modificationTime || '', - fs.owner || '', - fs.pathSuffix || '', - fs.permission || '', - fs.replication || '', - fs.snapshotEnabled || '', - parseHdfsFileType(fs.type) - ); - }); - callback(undefined, files); - } else { - callback(new HdfsError(ErrorMessageInvalidDataStructure), undefined); - } - }); - } - - /** - * Make new directory - * @returns void - */ - public mkdir(path: string, permission: string = '0755', callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('mkdirs', path, { - permission: permission - }); - - this.sendRequest('PUT', endpoint, undefined, (error) => { - if (callback) { - callback(error); - } - }); - } - - /** - * Rename path - * @returns void - */ - public rename(path: string, destination: string, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - this.checkArgDefined('destination', destination); - - let endpoint = this.getOperationEndpoint('rename', path, { - destination: destination - }); - - this.sendRequest('PUT', endpoint, undefined, (error) => { - if (callback) { - callback(error); - } - }); - } - - public getFileStatus(path: string, callback: (error: HdfsError, fileStatus: FileStatus) => void): void { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('getfilestatus', path); - this.sendRequest('GET', endpoint, undefined, (error, response) => { - if (!callback) { return; } - if (error) { - callback(error, undefined); - } else if (response.body.hasOwnProperty('FileStatus')) { - const fileStatus = new FileStatus( - response.body.FileStatus.accessTime || '', - response.body.FileStatus.blockSize || '', - response.body.FileStatus.group || '', - response.body.FileStatus.length || '', - response.body.FileStatus.modificationTime || '', - response.body.FileStatus.owner || '', - response.body.FileStatus.pathSuffix || '', - response.body.FileStatus.permission || '', - response.body.FileStatus.replication || '', - response.body.FileStatus.snapshotEnabled || '', - parseHdfsFileType(response.body.FileStatus.type || 'undefined') - ); - callback(undefined, fileStatus); - } else { - callback(new HdfsError(ErrorMessageInvalidDataStructure), undefined); - } - }); - } - - /** - * Get ACL status for given path - * @param path The path to the file/folder to get the status of - * @param callback Callback to handle the response - * @returns void - */ - public getAclStatus(path: string, callback: (error: HdfsError, permissionStatus: PermissionStatus) => void): void { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('getaclstatus', path); - this.sendRequest('GET', endpoint, undefined, (error, response) => { - if (!callback) { return; } - if (error) { - callback(error, undefined); - } else if (response.body.hasOwnProperty('AclStatus')) { - const permissions = parseAclPermissionFromOctal(response.body.AclStatus.permission); - const ownerEntry = new AclEntry(PermissionType.owner, '', `${response.body.AclStatus.owner || ''}${ownerPostfix}`); - ownerEntry.addPermission(AclEntryScope.access, permissions.owner); - const groupEntry = new AclEntry(PermissionType.group, '', `${response.body.AclStatus.group || ''}${owningGroupPostfix}`); - groupEntry.addPermission(AclEntryScope.access, permissions.group); - const otherEntry = new AclEntry(PermissionType.other, '', everyoneName); - otherEntry.addPermission(AclEntryScope.access, permissions.other); - const parsedEntries = parseAclList((response.body.AclStatus.entries).join(',')); - - // First go through and apply any ACLs for the unnamed entries (which correspond to the permissions in - // the permission octal) - parsedEntries.filter(e => e.name === '').forEach(e => { - let targetEntry: AclEntry; - switch (e.type) { - case AclType.user: - targetEntry = ownerEntry; - break; - case AclType.group: - targetEntry = groupEntry; - break; - case AclType.other: - targetEntry = otherEntry; - break; - default: - // Unknown type - just ignore since we don't currently support the other types - return; - } - e.getAllPermissions().forEach(sp => { - targetEntry.addPermission(sp.scope, sp.permission); - }); - }); - - const permissionStatus = new PermissionStatus( - ownerEntry, - groupEntry, - otherEntry, - !!response.body.AclStatus.stickyBit, - // We filter out empty names here since those have already been merged into the - // owner/owning group/other entries - parsedEntries.filter(e => e.name !== '')); - callback(undefined, permissionStatus); - } else { - callback(new HdfsError(ErrorMessageInvalidDataStructure), undefined); - } - }); - } - - /** - * Set ACL for the given path. The owner, group and other fields are required - other entries are optional. - * @param path The path to the file/folder to set the ACL on - * @param fileType The type of file we're setting to determine if defaults should be applied. Use undefined if type is unknown - * @param permissionStatus The status containing the permissions to set - * @param callback Callback to handle the response - */ - public setAcl(path: string, fileType: FileType | undefined, permissionStatus: PermissionStatus, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - this.checkArgDefined('permissionStatus', permissionStatus); - const concatEntries = [permissionStatus.owner, permissionStatus.group, permissionStatus.other].concat(permissionStatus.aclEntries); - const aclSpec = concatEntries.reduce((acc, entry: AclEntry) => acc.concat(entry.toAclStrings(fileType !== FileType.File)), []).join(','); - let endpoint = this.getOperationEndpoint('setacl', path, { aclspec: aclSpec }); - this.sendRequest('PUT', endpoint, undefined, (error) => { - return callback && callback(error); - }); - } - - /** - * Sets the permission octal (sticky, owner, group & other) for a file/folder - * @param path The path to the file/folder to set the permission of - * @param permissionStatus The status containing the permission to set - * @param callback Callback to handle the response - */ - public setPermission(path: string, permissionStatus: PermissionStatus, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - this.checkArgDefined('permissionStatus', permissionStatus); - let endpoint = this.getOperationEndpoint('setpermission', path, { permission: permissionStatus.permissionOctal }); - this.sendRequest('PUT', endpoint, undefined, (error) => { - return callback && callback(error); - }); - } - - /** - * Removes the default ACLs for the specified path - * @param path The path to remove the default ACLs for - * @param callback Callback to handle the response - */ - public removeDefaultAcl(path: string, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - let endpoint = this.getOperationEndpoint('removedefaultacl', path); - this.sendRequest('PUT', endpoint, undefined, (error) => { - return callback && callback(error); - }); - } - - /** - * Get all mounts for a HDFS connection - * @param callback Callback to handle the response - * @returns void - */ - public getMounts(callback: (error: HdfsError, mounts: Mount[]) => void): void { - let endpoint = this.getOperationEndpoint('listmounts', ''); - this.sendRequest('GET', endpoint, undefined, (error, response) => { - if (!callback) { return; } - if (error) { - callback(error, undefined); - } else if (response.body.hasOwnProperty('Mounts')) { - const mounts = response.body.Mounts; - callback(undefined, mounts); - } else { - callback(new HdfsError(ErrorMessageInvalidDataStructure), undefined); - } - }); - } - - /** - * Check file existence - * Wraps stat method - * - * @see WebHDFS.stat - * @returns void - */ - public exists(path: string, callback: (error: HdfsError, exists: boolean) => void): void { - this.checkArgDefined('path', path); - - this.listStatus(path, (error, fileStatus) => { - let exists = !fileStatus ? false : true; - callback(error, exists); - }); - } - - /** - * Write data to the file - * - * @param path - * @param data - * @param append If set to true then append data to the file - * @param opts - * @param callback - */ - public writeFile(path: string, data: string | Buffer, append: boolean, opts: object, - callback: (error: HdfsError) => void): fs.WriteStream { - this.checkArgDefined('path', path); - this.checkArgDefined('data', data); - - let error: HdfsError = null; - let localStream = new BufferStreamReader(data); - let remoteStream: fs.WriteStream = this.createWriteStream(path, !!append, opts || {}); - - // Handle events - remoteStream.once('error', (err) => { - error = err; - }); - - remoteStream.once('finish', () => { - if (callback && error) { - callback(error); - } - }); - - localStream.pipe(remoteStream); // Pipe data - return remoteStream; - } - - /** - * Append data to the file - * - * @see writeFile - */ - public appendFile(path: string, data: string | Buffer, opts: object, callback: (error: HdfsError) => void): fs.WriteStream { - return this.writeFile(path, data, true, opts, callback); - } - - /** - * Read data from the file - * - * @fires Request#data - * @fires WebHDFS#finish - * @returns void - */ - public readFile(path: string, callback: (error: HdfsError, buffer: Buffer) => void): void { - this.checkArgDefined('path', path); - - let remoteFileStream = this.createReadStream(path); - let data: any[] = []; - let error: HdfsError = undefined; - - remoteFileStream.once('error', (err) => { - error = err; - }); - - remoteFileStream.on('data', (dataChunk) => { - data.push(dataChunk); - }); - - remoteFileStream.once('finish', () => { - if (!callback) { return; } - if (!error) { - callback(undefined, Buffer.concat(data)); - } else { - callback(error, undefined); - } - }); - } - - /** - * Create writable stream for given path - * - * @fires WebHDFS#finish - * @param path - * @param append If set to true then append data to the file - * @param opts - * @example - * let hdfs = WebHDFS.createClient(); - * - * let localFileStream = hdfs.createReadStream('/path/to/local/file'); - * let remoteFileStream = hdfs.createWriteStream('/path/to/remote/file'); - * - * localFileStream.pipe(remoteFileStream); - * - * remoteFileStream.on('error', (err) => { - * // Do something with the error - * }); - * - * remoteFileStream.on('finish', () => { - * // Upload is done - * }); - */ - public createWriteStream(path: string, append?: boolean, opts?: object): fs.WriteStream { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint( - append ? 'append' : 'create', - path, - Object.assign( - { - overwrite: true, - permission: '0755' - }, - opts || {} - ) - ); - - let params: any = Object.assign( - { - method: append ? 'POST' : 'PUT', - url: endpoint, - json: true, - }, - this._requestParams - ); - params.headers = params.headers || {}; - params.headers['content-type'] = 'application/octet-stream'; - - if (!this._requestParams.isKerberos) { - return this.doCreateWriteStream(params); - } - // Else, must add kerberos token and handle redirects - return this.createKerberosWriteStream(params); - } - - private createKerberosWriteStream(params: any): fs.WriteStream { - params.followRedirect = false; - // Create an intermediate stream that pauses until we get a positive - // response from the server - let isWaiting = true; - let firstCb: Function = undefined; - let replyStream = through(function (chunk, enc, cb) { - this.push(chunk, enc); - if (isWaiting) { - firstCb = cb; - } else { - cb(); - } - }); - let handleErr = (err: any) => { - replyStream.emit('error', err); - replyStream.end(); - }; - let initRedirectedStream = () => { - // After redirect, create valid stream to correct location - // and pipe the intermediate stream to it, unblocking the data flow - params.headers['content-type'] = 'application/octet-stream'; - let upload = request(params, (err: any, res: request.Response, bo: any) => { - if (err || this.isError(res)) { - emitError(replyStream, this.parseError(res, bo, err)); - replyStream.end(); - } - else if (res.headers.hasOwnProperty('location')) { - replyStream.emit('finish', res.headers.location); - } - else { - replyStream.emit('finish'); - } - }); - isWaiting = false; - replyStream.pipe(upload); - if (firstCb) { - firstCb(); - } - }; - this.requestWithRedirectAndAuth(params, initRedirectedStream, handleErr); - return replyStream; - } - - private doCreateWriteStream(params: any): fs.WriteStream { - - let canResume: boolean = true; - let stream: Readable; - let req = request(params, (error: any, response: request.Response, body: any) => { - // Handle redirect only if there was not an error (e.g. res is defined) - if (response && this.isRedirect(response)) { - let upload = request(Object.assign(params, { url: response.headers.location }), (err: any, res: request.Response, bo: any) => { - if (err || this.isError(res)) { - emitError(req, this.parseError(res, bo, err)); - req.end(); - } - else if (res.headers.hasOwnProperty('location')) { - req.emit('finish', res.headers.location); - } - else { - req.emit('finish'); - } - }); - canResume = true; // Enable resume - stream.pipe(upload); - stream.resume(); - } - if (error || this.isError(response)) { - emitError(req, this.parseError(response, body, error)); - } - }); - req.on('pipe', (src: Readable) => { - // Pause read stream - stream = src; - stream.pause(); - // This is not an elegant solution but here we go - // Basically we don't allow pipe() method to resume reading input - // and set internal _readableState.flowing to false - canResume = false; - stream.on('resume', () => { - if (!canResume) { - (stream as any)._readableState.flowing = false; // i guess we are unsafely accessing this - } - }); - // Unpipe initial request - src.unpipe(req); - req.end(); - }); - return req; - } - - /** - * Create readable stream for given path - * - * @fires Request#data - * @fires WebHDFS#finish - * - * @example - * let hdfs = WebHDFS.createClient(); - * - * let remoteFileStream = hdfs.createReadStream('/path/to/remote/file'); - * - * remoteFileStream.on('error', (err) => { - * // Do something with the error - * }); - * - * remoteFileStream.on('data', (dataChunk) => { - * // Do something with the data chunk - * }); - * - * remoteFileStream.on('finish', () => { - * // Upload is done - * }); - */ - public createReadStream(path: string, opts?: object): fs.ReadStream { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('open', path, opts); - let params: request.OptionsWithUrl = Object.assign( - { - method: 'GET', - url: endpoint, - json: true - }, - this._requestParams - ); - if (!this._requestParams.isKerberos) { - return this.doCreateReadStream(params); - } - // Else, must add kerberos token and handle redirects - params.followRedirect = false; - let replyStream = through(); - let handleErr = (err: any) => { - replyStream.emit('error', err); - replyStream.end(); - }; - let initRedirectedStream = () => { - let redirectedStream = this.doCreateReadStream(params); - redirectedStream.pipe(replyStream); - }; - this.requestWithRedirectAndAuth(params, initRedirectedStream, handleErr); - - return replyStream; - } - - private requestWithRedirectAndAuth(params: request.OptionsWithUrl, onRedirected: () => void, handleErr: (err: any) => void) { - this.requestWithKerberosSync(params, (err, response: request.Response) => { - if (err && err.statusCode === 307 && response.headers['location']) { - // It's a redirect - params.url = response.headers['location']; - this.setKerberosAuthOnParams(params) - .then(onRedirected) - .catch(handleErr); - } else { - handleErr(err); - } - }); - } - - private doCreateReadStream(params: request.OptionsWithUrl): fs.ReadStream { - - let req: request.Request = request(params); - req.on('complete', (response) => { - req.emit('finish'); - }); - req.on('response', (response) => { - // Handle remote exceptions - // Remove all data handlers and parse error data - if (this.isError(response)) { - req.removeAllListeners('data'); - req.on('data', (data) => { - req.emit('error', this.parseError(response, data.toString())); - req.end(); - }); - } - else if (this.isRedirect(response)) { - let download = request(params); - download.on('complete', (response) => { - req.emit('finish'); - }); - // Proxy data to original data handler - // Not the nicest way but hey - download.on('data', (dataChunk) => { - req.emit('data', dataChunk); - }); - // Handle subrequest - download.on('response', (response) => { - if (this.isError(response)) { - download.removeAllListeners('data'); - download.on('data', (data) => { - req.emit('error', this.parseError(response, data.toString())); - req.end(); - }); - } - }); - } - // No need to interrupt the request - // data will be automatically sent to the data handler - }); - return req; - } - - /** - * Create symbolic link to the destination path - * - * @returns void - */ - public symlink(src: string, destination: string, createParent: boolean = false, callback: (error: HdfsError) => void): void { - this.checkArgDefined('src', src); - this.checkArgDefined('destination', destination); - - let endpoint = this.getOperationEndpoint('createsymlink', src, { - createParent: createParent, - destination: destination - }); - - this.sendRequest('PUT', endpoint, undefined, (error) => { - if (callback) { - callback(error); - } - }); - } - - /** - * Unlink path - * - * @returns void - */ - public unlink(path: string, recursive: boolean = false, callback: (error: HdfsError) => void): void { - this.checkArgDefined('path', path); - - let endpoint = this.getOperationEndpoint('delete', path, { recursive: recursive }); - this.sendRequest('DELETE', endpoint, undefined, (error) => { - if (callback) { - callback(error); - } - }); - } - - /** - * @alias WebHDFS.unlink - * @returns void - */ - public rmdir(path: string, recursive: boolean = false, callback: (error: HdfsError) => void): void { - this.unlink(path, recursive, callback); - } - - public static createClient(opts: IHdfsOptions): WebHDFS { - return new WebHDFS( - Object.assign( - { - host: 'localhost', - port: '50070', - path: '/webhdfs/v1' - }, - opts || {} - ), - opts.requestParams ?? { } - ); - } -} - -export class HdfsError extends Error { - constructor( - errorMessage: string, - public statusCode?: number, - public statusMessage?: string, - public remoteExceptionMessage?: string, - public internalError?: any) { - super(errorMessage); - } -} diff --git a/extensions/mssql/src/iconProvider.ts b/extensions/mssql/src/iconProvider.ts index 94cba1c30a..805613a84b 100644 --- a/extensions/mssql/src/iconProvider.ts +++ b/extensions/mssql/src/iconProvider.ts @@ -7,7 +7,6 @@ import * as azdata from 'azdata'; import * as constants from './constants'; const cloudIcon = 'mssql:cloud'; -const clusterIcon = 'mssql:cluster'; export class MssqlIconProvider implements azdata.IconProvider { public readonly providerId: string = constants.sqlProviderName; @@ -17,8 +16,6 @@ export class MssqlIconProvider implements azdata.IconProvider { if (connection.providerName === 'MSSQL') { if (serverInfo.isCloud) { iconName = cloudIcon; - } else if (serverInfo.options['isBigDataCluster']) { - iconName = clusterIcon; } } return Promise.resolve(iconName); diff --git a/extensions/mssql/src/localizedConstants.ts b/extensions/mssql/src/localizedConstants.ts index f7bdd9b1fd..e620a7d282 100644 --- a/extensions/mssql/src/localizedConstants.ts +++ b/extensions/mssql/src/localizedConstants.ts @@ -6,59 +6,7 @@ import * as nls from 'vscode-nls'; const localize = nls.loadMessageBundle(); -// HDFS Constants ////////////////////////////////////////////////////////// -export const msgMissingNodeContext = localize('msgMissingNodeContext', "Node Command called without any node passed"); - -// HDFS Manage Access Dialog Constants //////////////////////////////////// - -export const manageAccessTitle = localize('mssql.manageAccessTitle', "Manage Access"); -export const locationTitle = localize('mssql.locationTitle', "Location : "); -export const permissionsHeader = localize('mssql.permissionsTitle', "Permissions"); -export const ownerPostfix = localize('mssql.ownerPostfix', " - Owner"); -export const owner = localize('mssql.owner', "Owner"); -export const group = localize('mssql.group', "Group"); -export const owningGroupPostfix = localize('mssql.owningGroupPostfix', " - Owning Group"); -export const everyoneName = localize('mssql.everyone', "Everyone else"); -export const userLabel = localize('mssql.userLabel', "User"); -export const groupLabel = localize('mssql.groupLabel', "Group"); -export const accessHeader = localize('mssql.accessHeader', "Access"); -export const defaultHeader = localize('mssql.defaultHeader', "Default"); -export const deleteTitle = localize('mssql.delete', "Delete"); -export const stickyLabel = localize('mssql.stickyHeader', "Sticky Bit"); -export const inheritDefaultsLabel = localize('mssql.inheritDefaultsLabel', "Inherit Defaults"); -export const readHeader = localize('mssql.readHeader', "Read"); -export const writeHeader = localize('mssql.writeHeader', "Write"); -export const executeHeader = localize('mssql.executeHeader', "Execute"); -export const addUserOrGroupHeader = localize('mssql.addUserOrGroup', "Add User or Group"); -export const enterNamePlaceholder = localize('mssql.enterNamePlaceholder', "Enter name"); -export const addLabel = localize('mssql.addLabel', "Add"); -export const namedUsersAndGroupsHeader = localize('mssql.namedUsersAndGroups', "Named Users and Groups"); -export const defaultUserAndGroups = localize('mssql.defaultUserAndGroups', "Default User and Groups"); -export const userOrGroupIcon = localize('mssql.userOrGroupIcon', "User or Group Icon"); -export const applyText = localize('mssql.apply', "Apply"); -export const applyRecursivelyText = localize('mssql.applyRecursively', "Apply Recursively"); - -export function errorApplyingAclChanges(errMsg: string): string { return localize('mssql.errorApplyingAclChanges', "Unexpected error occurred while applying changes : {0}", errMsg); } - -// Spark Job Submission Constants ////////////////////////////////////////// -export const sparkLocalFileDestinationHint = localize('sparkJobSubmission.LocalFileDestinationHint', "Local file will be uploaded to HDFS. "); -export const sparkJobSubmissionEndMessage = localize('sparkJobSubmission.SubmissionEndMessage', ".......................... Submit Spark Job End ............................"); -export function sparkJobSubmissionPrepareUploadingFile(localPath: string, clusterFolder: string): string { return localize('sparkJobSubmission.PrepareUploadingFile', "Uploading file from local {0} to HDFS folder: {1}", localPath, clusterFolder); } -export const sparkJobSubmissionUploadingFileSucceeded = localize('sparkJobSubmission.UploadingFileSucceeded', "Upload file to cluster Succeeded!"); -export function sparkJobSubmissionUploadingFileFailed(err: string): string { return localize('sparkJobSubmission.UploadingFileFailed', "Upload file to cluster Failed. {0}", err); } -export function sparkJobSubmissionPrepareSubmitJob(jobName: string): string { return localize('sparkJobSubmission.PrepareSubmitJob', "Submitting job {0} ... ", jobName); } -export const sparkJobSubmissionSparkJobHasBeenSubmitted = localize('sparkJobSubmission.SubmitJobFinished', "The Spark Job has been submitted."); -export function sparkJobSubmissionSubmitJobFailed(err: string): string { return localize('sparkJobSubmission.SubmitJobFailed', "Spark Job Submission Failed. {0} ", err); } -export function sparkJobSubmissionYarnUIMessage(yarnUIURL: string): string { return localize('sparkJobSubmission.YarnUIMessage', "YarnUI Url: {0} ", yarnUIURL); } -export function sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryLink: string): string { return localize('sparkJobSubmission.SparkHistoryLinkMessage', "Spark History Url: {0} ", sparkHistoryLink); } -export function sparkJobSubmissionGetApplicationIdFailed(err: string): string { return localize('sparkJobSubmission.GetApplicationIdFailed', "Get Application Id Failed. {0}", err); } -export function sparkJobSubmissionLocalFileNotExisted(path: string): string { return localize('sparkJobSubmission.LocalFileNotExisted', "Local file {0} does not existed. ", path); } -export const sparkJobSubmissionNoSqlBigDataClusterFound = localize('sparkJobSubmission.NoSqlBigDataClusterFound', "No SQL Server Big Data Cluster found."); -export function sparkConnectionRequired(name: string): string { return localize('sparkConnectionRequired', "Please connect to the Spark cluster before View {0} History.", name); } - - export function failedToFindTenants(tenantId: string, accountName: string): string { return localize('mssql.failedToFindTenants', "Failed to find tenant '{0}' in account '{1}' when refreshing security token", tenantId, accountName); } export function tokenRefreshFailed(name: string): string { return localize('mssql.tokenRefreshFailed', "{0} AAD token refresh failed, please reconnect to enable {0}", name); } export const tokenRefreshFailedNoSecurityToken = localize('mssql.tokenRefreshFailedNoSecurityToken', "Editor token refresh failed, autocompletion will be disabled until the editor is disconnected and reconnected"); export function failedToFindAccount(accountName: string) { return localize('mssql.failedToFindAccount', "Failed to find azure account {0} when executing token refresh", accountName); } - diff --git a/extensions/mssql/src/main.ts b/extensions/mssql/src/main.ts index c01c6d812f..fae44fe5ab 100644 --- a/extensions/mssql/src/main.ts +++ b/extensions/mssql/src/main.ts @@ -6,26 +6,16 @@ import * as vscode from 'vscode'; import * as azdata from 'azdata'; import * as path from 'path'; -import * as os from 'os'; import * as Constants from './constants'; import ContextProvider from './contextProvider'; import * as Utils from './utils'; import { AppContext } from './appContext'; -import { UploadFilesCommand, MkDirCommand, SaveFileCommand, PreviewFileCommand, CopyPathCommand, DeleteFilesCommand, ManageAccessCommand } from './objectExplorerNodeProvider/hdfsCommands'; -import { IPrompter } from './prompts/question'; -import CodeAdapter from './prompts/adapter'; import { IExtension } from 'mssql'; -import { OpenSparkJobSubmissionDialogCommand, OpenSparkJobSubmissionDialogFromFileCommand, OpenSparkJobSubmissionDialogTask } from './sparkFeature/dialog/dialogCommands'; -import { OpenSparkYarnHistoryTask } from './sparkFeature/historyTask'; -import { MssqlObjectExplorerNodeProvider, mssqlOutputChannel } from './objectExplorerNodeProvider/objectExplorerNodeProvider'; -import { registerSearchServerCommand } from './objectExplorerNodeProvider/command'; import { MssqlIconProvider } from './iconProvider'; -import { registerServiceEndpoints, Endpoint } from './dashboard/serviceEndpoints'; import { getBookExtensionContributions } from './dashboard/bookExtensions'; import { registerBooksWidget } from './dashboard/bookWidget'; import { createMssqlApi } from './mssqlApiFactory'; -import { AuthType } from './util/auth'; import { SqlToolsServer } from './sqlToolsServer'; import { promises as fs } from 'fs'; import { IconPathHelper } from './iconHelper'; @@ -34,7 +24,6 @@ import { INotebookConvertService } from './notebookConvert/notebookConvertServic import { registerTableDesignerCommands } from './tableDesigner/tableDesigner'; const localize = nls.loadMessageBundle(); -const msgSampleCodeDataFrame = localize('msgSampleCodeDataFrame', "This sample code loads the file into a data frame and shows the first 10 results."); export async function activate(context: vscode.ExtensionContext): Promise { // lets make sure we support this platform first @@ -52,24 +41,16 @@ export async function activate(context: vscode.ExtensionContext): Promise { - await new OpenSparkJobSubmissionDialogTask(appContext, outputChannel).execute(profile); +function registerSearchServerCommand(): void { + vscode.commands.registerCommand('mssql.searchServers', () => { + void vscode.window.showInputBox({ + placeHolder: localize('mssql.searchServers', "Search Server Names") + }).then((stringSearch) => { + if (stringSearch) { + void vscode.commands.executeCommand('registeredServers.searchServer', (stringSearch)); + } + }); }); - azdata.tasks.registerTask(Constants.mssqlClusterLivyOpenSparkHistory, async (profile: azdata.IConnectionProfile) => { - await new OpenSparkYarnHistoryTask(appContext).execute(profile, true); + vscode.commands.registerCommand('mssql.clearSearchServerResult', () => { + void vscode.commands.executeCommand('registeredServers.clearSearchServerResult'); }); - azdata.tasks.registerTask(Constants.mssqlClusterLivyOpenYarnHistory, async (profile: azdata.IConnectionProfile) => { - await new OpenSparkYarnHistoryTask(appContext).execute(profile, false); - }); -} - -function activateNotebookTask(appContext: AppContext): void { - azdata.tasks.registerTask(Constants.mssqlClusterNewNotebookTask, (profile: azdata.IConnectionProfile) => { - return saveProfileAndCreateNotebook(profile); - }); - azdata.tasks.registerTask(Constants.mssqlClusterOpenNotebookTask, (profile: azdata.IConnectionProfile) => { - return handleOpenNotebookTask(profile); - }); - azdata.tasks.registerTask(Constants.mssqlOpenClusterDashboard, (profile: azdata.IConnectionProfile) => { - return handleOpenClusterDashboardTask(profile, appContext); - }); -} - -function saveProfileAndCreateNotebook(profile: azdata.IConnectionProfile): Promise { - return handleNewNotebookTask(undefined, profile); } function findNextUntitledEditorName(): string { @@ -178,74 +132,6 @@ function findNextUntitledEditorName(): string { } } -async function handleNewNotebookTask(oeContext?: azdata.ObjectExplorerContext, profile?: azdata.IConnectionProfile): Promise { - // Ensure we get a unique ID for the notebook. For now we're using a different prefix to the built-in untitled files - // to handle this. We should look into improving this in the future - let title = findNextUntitledEditorName(); - let untitledUri = vscode.Uri.parse(`untitled:${title}`); - let editor = await azdata.nb.showNotebookDocument(untitledUri, { - connectionProfile: profile, - preview: false - }); - if (oeContext && oeContext.nodeInfo && oeContext.nodeInfo.nodePath) { - // Get the file path after '/HDFS' - let hdfsPath: string = oeContext.nodeInfo.nodePath.substring(oeContext.nodeInfo.nodePath.indexOf('/HDFS') + '/HDFS'.length); - if (hdfsPath.length > 0) { - let analyzeCommand = '#' + msgSampleCodeDataFrame + os.EOL + 'df = (spark.read.option("inferSchema", "true")' - + os.EOL + '.option("header", "true")' + os.EOL + '.csv("{0}"))' + os.EOL + 'df.show(10)'; - await editor.edit(editBuilder => { - editBuilder.replace(0, { - cell_type: 'code', - source: analyzeCommand.replace('{0}', hdfsPath) - }); - }); - - } - } -} - -async function handleOpenNotebookTask(profile: azdata.IConnectionProfile): Promise { - let notebookFileTypeName = localize('notebookFileType', "Notebooks"); - let filter: { [key: string]: string[] } = {}; - filter[notebookFileTypeName] = ['ipynb']; - let uris = await vscode.window.showOpenDialog({ - filters: filter, - canSelectFiles: true, - canSelectMany: false - }); - if (uris && uris.length > 0) { - let fileUri = uris[0]; - // Verify this is a .ipynb file since this isn't actually filtered on Mac/Linux - if (path.extname(fileUri.fsPath) !== '.ipynb') { - // in the future might want additional supported types - void vscode.window.showErrorMessage(localize('unsupportedFileType', "Only .ipynb Notebooks are supported")); - } else { - await azdata.nb.showNotebookDocument(fileUri, { - connectionProfile: profile, - preview: false - }); - } - } -} - -async function handleOpenClusterDashboardTask(profile: azdata.IConnectionProfile, appContext: AppContext): Promise { - const serverInfo = await azdata.connection.getServerInfo(profile.id); - const controller = Utils.getClusterEndpoints(serverInfo).find(e => e.name === Endpoint.controller); - if (!controller) { - void vscode.window.showErrorMessage(localize('noController', "Could not find the controller endpoint for this instance")); - return; - } - - void vscode.commands.executeCommand('bigDataClusters.command.manageController', - { - url: controller.endpoint, - auth: profile.authenticationType === 'Integrated' ? AuthType.Integrated : AuthType.Basic, - username: 'admin', // Default to admin as a best-guess, we'll prompt for re-entering credentials if that fails - password: profile.password, - rememberPassword: true - }, /*addOrUpdateController*/true); -} - // this method is called when your extension is deactivated export function deactivate(): void { } diff --git a/extensions/mssql/src/mssql.d.ts b/extensions/mssql/src/mssql.d.ts index 0728a2f075..4d08e9526e 100644 --- a/extensions/mssql/src/mssql.d.ts +++ b/extensions/mssql/src/mssql.d.ts @@ -27,11 +27,6 @@ declare module 'mssql' { * Path to the root of the SQL Tools Service folder */ readonly sqlToolsServicePath: string; - /** - * Gets the object explorer API that supports querying over the connections supported by this extension - * - */ - getMssqlObjectExplorerBrowser(): MssqlObjectExplorerBrowser; /** * Get the Cms Service APIs to communicate with CMS connections supported by this extension diff --git a/extensions/mssql/src/mssqlApiFactory.ts b/extensions/mssql/src/mssqlApiFactory.ts index 43c929e3d6..f2038ebb68 100644 --- a/extensions/mssql/src/mssqlApiFactory.ts +++ b/extensions/mssql/src/mssqlApiFactory.ts @@ -4,10 +4,8 @@ *--------------------------------------------------------------------------------------------*/ import { AppContext } from './appContext'; -import { IExtension, ICmsService, IDacFxService, ISchemaCompareService, MssqlObjectExplorerBrowser, ILanguageExtensionService, ISqlAssessmentService, ISqlMigrationService, IAzureBlobService } from 'mssql'; +import { IExtension, ICmsService, IDacFxService, ISchemaCompareService, ILanguageExtensionService, ISqlAssessmentService, ISqlMigrationService, IAzureBlobService } from 'mssql'; import * as constants from './constants'; -import { MssqlObjectExplorerNodeProvider } from './objectExplorerNodeProvider/objectExplorerNodeProvider'; -import * as azdata from 'azdata'; import { SqlToolsServer } from './sqlToolsServer'; export function createMssqlApi(context: AppContext, sqlToolsServer: SqlToolsServer): IExtension { @@ -27,14 +25,6 @@ export function createMssqlApi(context: AppContext, sqlToolsServer: SqlToolsServ get languageExtension() { return context.getService(constants.LanguageExtensionService); }, - getMssqlObjectExplorerBrowser(): MssqlObjectExplorerBrowser { - return { - getNode: (explorerContext: azdata.ObjectExplorerContext) => { - let oeProvider = context.getService(constants.ObjectExplorerService); - return oeProvider.findSqlClusterNodeByContext(explorerContext); - } - }; - }, get sqlAssessment() { return context.getService(constants.SqlAssessmentService); }, diff --git a/extensions/mssql/src/objectExplorerNodeProvider/cancelableStream.ts b/extensions/mssql/src/objectExplorerNodeProvider/cancelableStream.ts deleted file mode 100644 index a2499389ed..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/cancelableStream.ts +++ /dev/null @@ -1,25 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { Transform } from 'stream'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; - -const localize = nls.loadMessageBundle(); - -export class CancelableStream extends Transform { - constructor(private cancelationToken: vscode.CancellationTokenSource) { - super(); - } - - public override _transform(chunk: any, encoding: string, callback: Function): void { - if (this.cancelationToken && this.cancelationToken.token.isCancellationRequested) { - callback(new Error(localize('streamCanceled', "Stream operation canceled by the user"))); - } else { - this.push(chunk); - callback(); - } - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/command.ts b/extensions/mssql/src/objectExplorerNodeProvider/command.ts deleted file mode 100644 index e36f1cc486..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/command.ts +++ /dev/null @@ -1,187 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import { TreeNode } from './treeNodes'; -import { QuestionTypes, IPrompter, IQuestion } from '../prompts/question'; -import * as utils from '../utils'; -import * as constants from '../constants'; -import { AppContext } from '../appContext'; - -interface ICommandContextParsingOptions { - editor: boolean; - uri: boolean; -} - -interface ICommandBaseContext { - command: string; - editor?: vscode.TextEditor; - uri?: vscode.Uri; -} - -export interface ICommandUnknownContext extends ICommandBaseContext { - type: 'unknown'; -} - -interface ICommandUriContext extends ICommandBaseContext { - type: 'uri'; -} - -export interface ICommandViewContext extends ICommandBaseContext { - type: 'view'; - node: TreeNode; -} - -export interface ICommandObjectExplorerContext extends ICommandBaseContext { - type: 'objectexplorer'; - explorerContext: azdata.ObjectExplorerContext; -} - -type CommandContext = ICommandObjectExplorerContext | ICommandViewContext | ICommandUriContext | ICommandUnknownContext; - -function isTextEditor(editor: any): editor is vscode.TextEditor { - if (editor === undefined) { return false; } - - return editor.id !== undefined && ((editor as vscode.TextEditor).edit !== undefined || (editor as vscode.TextEditor).document !== undefined); -} - -export abstract class Command extends vscode.Disposable { - - - protected readonly contextParsingOptions: ICommandContextParsingOptions = { editor: false, uri: false }; - - private disposable: vscode.Disposable; - - constructor(command: string | string[], protected appContext: AppContext) { - super(() => this.dispose()); - - if (typeof command === 'string') { - this.disposable = vscode.commands.registerCommand(command, (...args: any[]) => this._execute(command, ...args), this); - - return; - } - - const subscriptions = command.map(cmd => vscode.commands.registerCommand(cmd, (...args: any[]) => this._execute(cmd, ...args), this)); - this.disposable = vscode.Disposable.from(...subscriptions); - } - - override dispose(): void { - if (this.disposable) { - this.disposable.dispose(); - } - } - - protected async preExecute(...args: any[]): Promise { - return this.execute(...args); - } - - abstract execute(...args: any[]): any; - - protected _execute(command: string, ...args: any[]): any { - // TODO consider using Telemetry.trackEvent(command); - - const [context, rest] = Command.parseContext(command, this.contextParsingOptions, ...args); - return this.preExecute(context, ...rest); - } - - private static parseContext(command: string, options: ICommandContextParsingOptions, ...args: any[]): [CommandContext, any[]] { - let editor: vscode.TextEditor | undefined = undefined; - - let firstArg = args[0]; - if (options.editor && (firstArg === undefined || isTextEditor(firstArg))) { - editor = firstArg; - args = args.slice(1); - firstArg = args[0]; - } - - if (options.uri && (firstArg === undefined || firstArg instanceof vscode.Uri)) { - const [uri, ...rest] = args as [vscode.Uri, any]; - return [{ command: command, type: 'uri', editor: editor, uri: uri }, rest]; - } - - if (firstArg instanceof TreeNode) { - const [node, ...rest] = args as [TreeNode, any]; - return [{ command: command, type: constants.ViewType, node: node }, rest]; - } - - if (firstArg && utils.isObjectExplorerContext(firstArg)) { - const [explorerContext, ...rest] = args as [azdata.ObjectExplorerContext, any]; - return [{ command: command, type: constants.ObjectExplorerService, explorerContext: explorerContext }, rest]; - } - - return [{ command: command, type: 'unknown', editor: editor }, args]; - } -} - -export abstract class ProgressCommand extends Command { - static progressId = 0; - constructor(command: string, protected prompter: IPrompter, appContext: AppContext) { - super(command, appContext); - } - - protected async executeWithProgress( - execution: (cancelToken: vscode.CancellationTokenSource) => Promise, - label: string, - isCancelable: boolean = false, - onCanceled?: () => void - ): Promise { - let disposables: vscode.Disposable[] = []; - const tokenSource = new vscode.CancellationTokenSource(); - const statusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left); - disposables.push(vscode.Disposable.from(statusBarItem)); - statusBarItem.text = localize('progress', "$(sync~spin) {0}...", label); - if (isCancelable) { - const cancelCommandId = `cancelProgress${ProgressCommand.progressId++}`; - disposables.push(vscode.commands.registerCommand(cancelCommandId, async () => { - if (await this.confirmCancel()) { - tokenSource.cancel(); - } - })); - statusBarItem.tooltip = localize('cancelTooltip', "Cancel"); - statusBarItem.command = cancelCommandId; - } - statusBarItem.show(); - - try { - await execution(tokenSource); - } catch (error) { - if (isCancelable && onCanceled && tokenSource.token.isCancellationRequested) { - // The error can be assumed to be due to cancelation occurring. Do the callback - onCanceled(); - } else { - throw error; - } - } finally { - disposables.forEach(d => d.dispose()); - } - } - - private async confirmCancel(): Promise { - return await this.prompter.promptSingle({ - type: QuestionTypes.confirm, - message: localize('cancel', "Cancel operation?"), - default: true - }); - } -} - -export function registerSearchServerCommand(appContext: AppContext): void { - vscode.commands.registerCommand('mssql.searchServers', () => { - void vscode.window.showInputBox({ - placeHolder: localize('mssql.searchServers', "Search Server Names") - }).then((stringSearch) => { - if (stringSearch) { - void vscode.commands.executeCommand('registeredServers.searchServer', (stringSearch)); - } - }); - }); - vscode.commands.registerCommand('mssql.clearSearchServerResult', () => { - void vscode.commands.executeCommand('registeredServers.clearSearchServerResult'); - }); -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/connection.ts b/extensions/mssql/src/objectExplorerNodeProvider/connection.ts deleted file mode 100644 index 52e04592b7..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/connection.ts +++ /dev/null @@ -1,123 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import * as constants from '../constants'; -import { getIgnoreSslVerificationConfigSetting } from '../util/auth'; -import { IFileSource, IHdfsOptions, FileSourceFactory } from './fileSources'; - -export class SqlClusterConnection { - private _connection: azdata.connection.Connection; - private _profile: azdata.IConnectionProfile; - private _host: string; - private _port: string; - private _user: string; - private _password: string; - - constructor(connectionInfo: azdata.connection.Connection | azdata.IConnectionProfile) { - this.validate(connectionInfo); - - if ('id' in connectionInfo) { - this._profile = connectionInfo; - this._connection = this.toConnection(this._profile); - } else { - this._connection = connectionInfo; - } - this._host = this._connection.options[constants.hostPropName]; - this._port = this._connection.options[constants.knoxPortPropName]; - this._user = this._connection.options[constants.userPropName]; - this._password = this._connection.options[constants.passwordPropName]; - } - - public get connection(): azdata.connection.Connection { return this._connection; } - public get host(): string { return this._host; } - public get port(): number { return this._port ? Number.parseInt(this._port) : constants.defaultKnoxPort; } - public get user(): string { return this._user; } - public get password(): string { return this._password; } - - public isMatch(connection: SqlClusterConnection | azdata.ConnectionInfo): boolean { - if (!connection) { return false; } - let options1 = connection instanceof SqlClusterConnection ? - connection._connection.options : connection.options; - let options2 = this._connection.options; - return [constants.hostPropName, constants.knoxPortPropName, constants.userPropName] - .every(e => options1[e] === options2[e]); - } - - public async createHdfsFileSource(): Promise { - let options: IHdfsOptions = { - protocol: 'https', - host: this.host, - port: this.port, - user: this.user, - path: 'gateway/default/webhdfs/v1', - requestParams: { - rejectUnauthorized: !getIgnoreSslVerificationConfigSetting() - } - }; - if (this.isIntegratedAuth()) { - options.requestParams.isKerberos = this.isIntegratedAuth(); - options.requestParams.auth = undefined; - } else { - options.requestParams.auth = { - user: this.user, - pass: this.password - }; - } - let fileSource = await FileSourceFactory.instance.createHdfsFileSource(options); - return fileSource; - } - - public isIntegratedAuth(): boolean { - let authType: string = this._connection.options[constants.authenticationTypePropName]; - return authType && authType.toLowerCase() === constants.integratedAuth; - } - - public updateUsername(username: string): void { - if (username) { - this._user = username; - } - } - - public updatePassword(password: string): void { - if (password) { - this._password = password; - } - } - - private validate(connectionInfo: azdata.ConnectionInfo): void { - if (!connectionInfo) { - throw new Error(localize('connectionInfoUndefined', "ConnectionInfo is undefined.")); - } - if (!connectionInfo.options) { - throw new Error(localize('connectionInfoOptionsUndefined', "ConnectionInfo.options is undefined.")); - } - let missingProperties: string[] = this.getMissingProperties(connectionInfo); - if (missingProperties && missingProperties.length > 0) { - throw new Error(localize('connectionInfoOptionsMissingProperties', - "Some missing properties in connectionInfo.options: {0}", - missingProperties.join(', '))); - } - } - - private getMissingProperties(connectionInfo: azdata.ConnectionInfo): string[] { - if (!connectionInfo || !connectionInfo.options) { return undefined; } - let requiredProps = [constants.hostPropName, constants.knoxPortPropName]; - let authType = connectionInfo.options[constants.authenticationTypePropName] && connectionInfo.options[constants.authenticationTypePropName].toLowerCase(); - if (authType !== constants.integratedAuth) { - requiredProps.push(constants.userPropName, constants.passwordPropName); - } - return requiredProps.filter(e => connectionInfo.options[e] === undefined); - } - - private toConnection(connProfile: azdata.IConnectionProfile): azdata.connection.Connection { - let connection: azdata.connection.Connection = Object.assign(connProfile, - { connectionId: this._profile.id }); - return connection; - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/fileSources.ts b/extensions/mssql/src/objectExplorerNodeProvider/fileSources.ts deleted file mode 100644 index 741357ec66..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/fileSources.ts +++ /dev/null @@ -1,428 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as fspath from 'path'; -import * as fs from 'fs'; -import * as meter from 'stream-meter'; -import * as bytes from 'bytes'; -import * as https from 'https'; -import * as readline from 'readline'; -import * as os from 'os'; -import * as nls from 'vscode-nls'; - -import * as constants from '../constants'; -import { WebHDFS, HdfsError } from '../hdfs/webhdfs'; -import { PermissionStatus } from '../hdfs/aclEntry'; -import { Mount, MountStatus } from '../hdfs/mount'; -import { FileStatus, hdfsFileTypeToFileType } from '../hdfs/fileStatus'; - -const localize = nls.loadMessageBundle(); - -export function joinHdfsPath(parent: string, child: string): string { - if (parent === constants.hdfsRootPath) { - return `/${child}`; - } - return `${parent}/${child}`; -} - -export const enum FileType { - Directory = 'Directory', - File = 'File', - Symlink = 'Symlink' -} - -export interface IFile { - path: string; - fileType: FileType; - mountStatus?: MountStatus; -} - -export class File implements IFile { - public mountStatus?: MountStatus; - constructor(public path: string, public fileType: FileType) { - - } - - public static createPath(path: string, fileName: string): string { - return joinHdfsPath(path, fileName); - } - - public static createChild(parent: IFile, fileName: string, fileType: FileType): IFile { - return new File(File.createPath(parent.path, fileName), fileType); - } - - public static createFile(parent: IFile, fileName: string): File { - return File.createChild(parent, fileName, FileType.File); - } - - public static createDirectory(parent: IFile, fileName: string): IFile { - return File.createChild(parent, fileName, FileType.Directory); - } - - public static getBasename(file: IFile): string { - return fspath.basename(file.path); - } -} - -export interface IFileSource { - enumerateFiles(path: string, refresh?: boolean): Promise; - mkdir(dirName: string, remoteBasePath: string): Promise; - createReadStream(path: string): fs.ReadStream; - readFile(path: string, maxBytes?: number): Promise; - readFileLines(path: string, maxLines: number): Promise; - writeFile(localFile: IFile, remoteDir: string): Promise; - delete(path: string, recursive?: boolean): Promise; - /** - * Retrieves the file status for the specified path (may be a file or directory) - */ - getFileStatus(path: string): Promise; - /** - * Get ACL status for given path - * @param path The path to the file/folder to get the status of - */ - getAclStatus(path: string): Promise; - /** - * Sets the ACL status for given path - * @param path The path to the file/folder to set the ACL on - * @param fileType The type of file we're setting to determine if defaults should be applied. Use undefined if type is unknown - * @param permissionStatus The status containing the permissions to set - */ - setAcl(path: string, fileType: FileType | undefined, permissionStatus: PermissionStatus): Promise; - /** - * Removes the default ACLs for the specified path - * @param path The path to remove the default ACLs for - */ - removeDefaultAcl(path: string): Promise; - /** - * Sets the permission octal (sticky, owner, group & other) for a file/folder - * @param path The path to the file/folder to set the permission of - * @param aclStatus The status containing the permission to set - */ - setPermission(path: string, aclStatus: PermissionStatus): Promise; - exists(path: string): Promise; -} - -interface IHttpAuthentication { - user: string; - pass: string; -} - -export interface IHdfsOptions { - host?: string; - port?: number; - protocol?: string; - user?: string; - path?: string; - requestParams?: IRequestParams; -} - -export interface IRequestParams { - auth?: IHttpAuthentication; - isKerberos?: boolean; - /** - * Timeout in milliseconds to wait for response - */ - timeout?: number; - agent?: https.Agent; - headers?: {}; - rejectUnauthorized?: boolean; -} - -export class FileSourceFactory { - private static _instance: FileSourceFactory; - - public static get instance(): FileSourceFactory { - if (!FileSourceFactory._instance) { - FileSourceFactory._instance = new FileSourceFactory(); - } - return FileSourceFactory._instance; - } - - public async createHdfsFileSource(options: IHdfsOptions): Promise { - options = options && options.host ? FileSourceFactory.removePortFromHost(options) : options; - return new HdfsFileSource(WebHDFS.createClient(options)); - } - - // remove port from host when port is specified after a comma or colon - private static removePortFromHost(options: IHdfsOptions): IHdfsOptions { - // determine whether the host has either a ',' or ':' in it - options = this.setHostAndPort(options, ','); - options = this.setHostAndPort(options, ':'); - return options; - } - - // set port and host correctly after we've identified that a delimiter exists in the host name - private static setHostAndPort(options: IHdfsOptions, delimeter: string): IHdfsOptions { - let optionsHost: string = options.host; - if (options.host.indexOf(delimeter) > -1) { - options.host = options.host.slice(0, options.host.indexOf(delimeter)); - options.port = Number.parseInt(optionsHost.replace(options.host + delimeter, '')); - } - return options; - } -} - -class HdfsFileSource implements IFileSource { - private mounts: Map; - constructor(private client: WebHDFS) { - } - - public async enumerateFiles(path: string, refresh?: boolean): Promise { - if (!this.mounts || refresh) { - await this.loadMounts(); - } - return this.listStatus(path); - } - - private loadMounts(): Promise { - return new Promise((resolve, reject) => { - this.client.getMounts((error, mounts) => { - this.mounts = new Map(); - if (!error && mounts) { - mounts.forEach(m => this.mounts.set(m.mountPath, m)); - } - resolve(); - }); - }); - } - - private listStatus(path: string): Promise { - return new Promise((resolve, reject) => { - this.client.listStatus(path, (error, fileStatuses) => { - if (error) { - reject(error); - } - else { - let hdfsFiles: IFile[] = fileStatuses.map(fileStatus => { - let file = new File(File.createPath(path, fileStatus.pathSuffix), hdfsFileTypeToFileType(fileStatus.type)); - if (this.mounts && this.mounts.has(file.path)) { - file.mountStatus = MountStatus.Mount; - } - return file; - }); - resolve(hdfsFiles); - } - }); - }); - } - - public mkdir(dirName: string, remoteBasePath: string): Promise { - return new Promise((resolve, reject) => { - let remotePath = joinHdfsPath(remoteBasePath, dirName); - this.client.mkdir(remotePath, undefined, (err) => { - if (err) { - reject(err); - } else { - resolve(undefined); - } - }); - }); - } - - public createReadStream(path: string): fs.ReadStream { - return this.client.createReadStream(path); - } - - public readFile(path: string, maxBytes?: number): Promise { - return new Promise((resolve, reject) => { - let error: HdfsError = undefined; - let remoteFileStream: fs.ReadStream | meter.StreamMeter = this.client.createReadStream(path); - remoteFileStream.on('error', (err) => { - error = err; - reject(error); - }); - - let data: any[] = []; - if (maxBytes) { - remoteFileStream = remoteFileStream.pipe(meter(maxBytes)); - remoteFileStream.on('error', (err) => { - error = err; - if (error.message.includes('Stream exceeded specified max')) { - // We have data > maxbytes, show we're truncating - let previewNote: string = '#################################################################################################################### \r\n' + - '########################### ' + localize('maxSizeNotice', "NOTICE: This file has been truncated at {0} for preview. ", bytes(maxBytes)) + '############################### \r\n' + - '#################################################################################################################### \r\n'; - data.splice(0, 0, Buffer.from(previewNote, 'utf-8')); - void vscode.window.showWarningMessage(localize('maxSizeReached', "The file has been truncated at {0} for preview.", bytes(maxBytes))); - resolve(Buffer.concat(data)); - } else { - reject(error); - } - }); - } - - remoteFileStream.on('data', (chunk) => { - data.push(chunk); - }); - - remoteFileStream.once('finish', () => { - if (!error) { - resolve(Buffer.concat(data)); - } - }); - }); - } - - public readFileLines(path: string, maxLines: number): Promise { - return new Promise((resolve, reject) => { - let lineReader = readline.createInterface({ - input: this.client.createReadStream(path) - }); - - let lineCount = 0; - let lineData: string[] = []; - let error: HdfsError = undefined; - lineReader.on('line', (line: string) => { - lineCount++; - lineData.push(line); - if (lineCount >= maxLines) { - resolve(Buffer.from(lineData.join(os.EOL))); - lineReader.close(); - } - }) - .on('error', (err) => { - error = err; - reject(error); - }) - .on('close', () => { - if (!error) { - resolve(Buffer.from(lineData.join(os.EOL))); - } - }); - }); - } - - public writeFile(localFile: IFile, remoteDirPath: string): Promise { - return new Promise((resolve, reject) => { - let fileName = fspath.basename(localFile.path); - let remotePath = joinHdfsPath(remoteDirPath, fileName); - - let error: HdfsError = undefined; - let writeStream = this.client.createWriteStream(remotePath); - // API always calls finish, so catch error then handle exit in the finish event - writeStream.on('error', (err) => { - error = err; - reject(error); - }); - writeStream.on('finish', (location: string) => { - if (!error) { - resolve(location); - } - }); - - let readStream = fs.createReadStream(localFile.path); - readStream.on('error', (err) => { - error = err; - reject(error); - }); - - readStream.pipe(writeStream); - }); - } - - public delete(path: string, recursive: boolean = false): Promise { - return new Promise((resolve, reject) => { - this.client.rmdir(path, recursive, (error) => { - if (error) { - reject(error); - } else { - resolve(undefined); - } - }); - }); - } - - public exists(path: string): Promise { - return new Promise((resolve, reject) => { - this.client.exists(path, (error, exists) => { - if (error) { - reject(error); - } else { - resolve(exists); - } - }); - }); - } - - public getFileStatus(path: string): Promise { - return new Promise((resolve, reject) => { - this.client.getFileStatus(path, (error: HdfsError, fileStatus: FileStatus) => { - if (error) { - reject(error); - } else { - resolve(fileStatus); - } - }); - }); - } - - /** - * Get ACL status for given path - * @param path The path to the file/folder to get the status of - */ - public getAclStatus(path: string): Promise { - return new Promise((resolve, reject) => { - this.client.getAclStatus(path, (error: HdfsError, permissionStatus: PermissionStatus) => { - if (error) { - reject(error); - } else { - resolve(permissionStatus); - } - }); - }); - } - - /** - * Sets the ACL status for given path - * @param path The path to the file/folder to set the ACL on - * @param fileType The type of file we're setting to determine if defaults should be applied. Use undefined if type is unknown - * @param permissionStatus The permissions to set - */ - public setAcl(path: string, fileType: FileType | undefined, permissionStatus: PermissionStatus): Promise { - return new Promise((resolve, reject) => { - this.client.setAcl(path, fileType, permissionStatus, (error: HdfsError) => { - if (error) { - reject(error); - } else { - resolve(); - } - }); - }); - } - - /** - * Removes the default ACLs for the specified path - * @param path The path to remove the default ACLs for - */ - public removeDefaultAcl(path: string): Promise { - return new Promise((resolve, reject) => { - this.client.removeDefaultAcl(path, (error: HdfsError) => { - if (error) { - reject(error); - } else { - resolve(); - } - }); - }); - } - - /** - * Sets the permission octal (sticky, owner, group & other) for a file/folder - * @param path The path to the file/folder to set the permission of - * @param aclStatus The status containing the permission to set - */ - public setPermission(path: string, aclStatus: PermissionStatus): Promise { - return new Promise((resolve, reject) => { - this.client.setPermission(path, aclStatus, (error: HdfsError) => { - if (error) { - reject(error); - } else { - resolve(); - } - }); - }); - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/hdfsCommands.ts b/extensions/mssql/src/objectExplorerNodeProvider/hdfsCommands.ts deleted file mode 100644 index dbbed74f9f..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/hdfsCommands.ts +++ /dev/null @@ -1,418 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; -import { promises as fs } from 'fs'; -import * as fspath from 'path'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import { Command, ICommandViewContext, ProgressCommand, ICommandObjectExplorerContext } from './command'; -import { File, IFile, joinHdfsPath, FileType } from './fileSources'; -import { FolderNode, FileNode, HdfsFileSourceNode } from './hdfsProvider'; -import { IPrompter, IQuestion, QuestionTypes } from '../prompts/question'; -import * as constants from '../constants'; -import * as LocalizedConstants from '../localizedConstants'; -import * as utils from '../utils'; -import { AppContext } from '../appContext'; -import { TreeNode } from './treeNodes'; -import { MssqlObjectExplorerNodeProvider } from './objectExplorerNodeProvider'; -import { ManageAccessDialog } from '../hdfs/ui/hdfsManageAccessDialog'; - -async function getSaveableUri(fileName: string, isPreview?: boolean): Promise { - let root = utils.getUserHome(); - let workspaceFolders = vscode.workspace.workspaceFolders; - if (workspaceFolders && workspaceFolders.length > 0) { - root = workspaceFolders[0].uri.fsPath; - } - // Cannot preview with a file path that already exists, so keep looking for a valid path that does not exist - if (isPreview) { - let fileNum = 1; - let fileNameWithoutExtension = fspath.parse(fileName).name; - let fileExtension = fspath.parse(fileName).ext; - while (await utils.exists(fspath.join(root, fileName))) { - fileName = `${fileNameWithoutExtension}-${fileNum}${fileExtension}`; - fileNum++; - } - } - return vscode.Uri.file(fspath.join(root, fileName)); -} - -export async function getNode(context: ICommandViewContext | ICommandObjectExplorerContext, appContext: AppContext): Promise { - let node: T = undefined; - if (context && context.type === constants.ViewType && context.node) { - node = context.node as T; - } else if (context && context.type === constants.ObjectExplorerService) { - let oeNodeProvider = appContext.getService(constants.ObjectExplorerService); - if (oeNodeProvider) { - node = await oeNodeProvider.findSqlClusterNodeByContext(context); - } - } else { - throw new Error(LocalizedConstants.msgMissingNodeContext); - } - return node; -} - -export class UploadFilesCommand extends ProgressCommand { - - constructor(prompter: IPrompter, appContext: AppContext) { - super('mssqlCluster.uploadFiles', prompter, appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let folderNode = await getNode(context, this.appContext); - const allFilesFilter = localize('allFiles', "All Files"); - let filter: { [key: string]: string[] } = {}; - filter[allFilesFilter] = ['*']; - if (folderNode) { - let options: vscode.OpenDialogOptions = { - canSelectFiles: true, - canSelectFolders: false, - canSelectMany: true, - openLabel: localize('lblUploadFiles', "Upload"), - filters: filter - }; - let fileUris: vscode.Uri[] = await vscode.window.showOpenDialog(options); - if (fileUris) { - let files: IFile[] = await Promise.all(fileUris.map(uri => uri.fsPath).map(this.mapPathsToFiles())); - await this.executeWithProgress( - (cancelToken: vscode.CancellationTokenSource) => this.writeFiles(files, folderNode, cancelToken), - localize('uploading', "Uploading files to HDFS"), true, - () => vscode.window.showInformationMessage(localize('uploadCanceled', "Upload operation was canceled"))); - if (context.type === constants.ObjectExplorerService) { - let objectExplorerNode = await azdata.objectexplorer.getNode(context.explorerContext.connectionProfile.id, folderNode.getNodeInfo().nodePath); - await objectExplorerNode.refresh(); - } - } - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('uploadError', "Error uploading files: {0}", utils.getErrorMessage(err, true))); - } - } - - private mapPathsToFiles(): (value: string, index: number, array: string[]) => Promise { - return async (path: string) => { - const stats = (await fs.lstat(path)); - if (stats.isDirectory()) { - return new File(path, FileType.Directory); - } else if (stats.isSymbolicLink()) { - return new File(path, FileType.Symlink); - } else { - return new File(path, FileType.File); - } - - }; - } - - private async writeFiles(files: IFile[], folderNode: FolderNode, cancelToken: vscode.CancellationTokenSource): Promise { - for (let file of files) { - if (cancelToken.token.isCancellationRequested) { - // Throw here so that all recursion is ended - throw new Error('Upload canceled'); - } - if (file.fileType === FileType.Directory) { - let dirName = fspath.basename(file.path); - let subFolder = await folderNode.mkdir(dirName); - let children: IFile[] = await Promise.all((await fs.readdir(file.path)) - .map(childFileName => joinHdfsPath(file.path, childFileName)) - .map(this.mapPathsToFiles())); - await this.writeFiles(children, subFolder, cancelToken); - } else { - await folderNode.writeFile(file); - } - } - } -} -export class MkDirCommand extends ProgressCommand { - - constructor(prompter: IPrompter, appContext: AppContext) { - super('mssqlCluster.mkdir', prompter, appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let folderNode = await getNode(context, this.appContext); - - if (folderNode) { - let fileName: string = await this.getDirName(); - if (fileName && fileName.length > 0) { - await this.executeWithProgress( - async (cancelToken: vscode.CancellationTokenSource) => this.mkDir(fileName, folderNode, cancelToken), - localize('makingDir', "Creating directory"), true, - () => vscode.window.showInformationMessage(localize('mkdirCanceled', "Operation was canceled"))); - if (context.type === constants.ObjectExplorerService) { - let objectExplorerNode = await azdata.objectexplorer.getNode(context.explorerContext.connectionProfile.id, folderNode.getNodeInfo().nodePath); - await objectExplorerNode.refresh(); - } - } - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('mkDirError', "Error on making directory: {0}", utils.getErrorMessage(err, true))); - } - } - - private async getDirName(): Promise { - return await this.prompter.promptSingle({ - type: QuestionTypes.input, - name: 'enterDirName', - message: localize('enterDirName', "Enter directory name"), - default: '' - }).then(confirmed => confirmed); - } - - private async mkDir(fileName: string, folderNode: FolderNode, cancelToken: vscode.CancellationTokenSource): Promise { - await folderNode.mkdir(fileName); - } -} - -export class DeleteFilesCommand extends Command { - - constructor(private prompter: IPrompter, appContext: AppContext) { - super('mssqlCluster.deleteFiles', appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let node = await getNode(context, this.appContext); - if (node) { - // TODO ideally would let node define if it's deletable - // TODO also, would like to change this to getNodeInfo as OE is the primary use case now - let treeItem = await node.getTreeItem(); - let oeNodeToRefresh: azdata.objectexplorer.ObjectExplorerNode = undefined; - if (context.type === constants.ObjectExplorerService) { - let oeNodeToDelete = await azdata.objectexplorer.getNode(context.explorerContext.connectionProfile.id, node.getNodeInfo().nodePath); - oeNodeToRefresh = await oeNodeToDelete.getParent(); - } - switch (treeItem.contextValue) { - case constants.MssqlClusterItems.Folder: - await this.deleteFolder(node); - break; - case constants.MssqlClusterItems.File: - await this.deleteFile(node); - break; - default: - return; - } - if (oeNodeToRefresh) { - await oeNodeToRefresh.refresh(); - } - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('deleteError', "Error on deleting files: {0}", utils.getErrorMessage(err, true))); - } - } - - private async confirmDelete(deleteMsg: string): Promise { - return await this.prompter.promptSingle({ - type: QuestionTypes.confirm, - message: deleteMsg, - default: false - }).then(confirmed => confirmed); - } - - private async deleteFolder(node: FolderNode): Promise { - if (node) { - let confirmed = await this.confirmDelete(localize('msgDeleteFolder', "Are you sure you want to delete this folder and its contents?")); - if (confirmed) { - // TODO prompt for recursive delete if non-empty? - await node.delete(true); - } - } - } - - private async deleteFile(node: FileNode): Promise { - if (node) { - let confirmed = await this.confirmDelete(localize('msgDeleteFile', "Are you sure you want to delete this file?")); - if (confirmed) { - await node.delete(); - } - } - } -} - -export class SaveFileCommand extends ProgressCommand { - - constructor(prompter: IPrompter, appContext: AppContext) { - super('mssqlCluster.saveFile', prompter, appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let fileNode = await getNode(context, this.appContext); - if (fileNode) { - let defaultUri = await getSaveableUri(fspath.basename(fileNode.hdfsPath)); - let fileUri: vscode.Uri = await vscode.window.showSaveDialog({ - defaultUri: defaultUri - }); - if (fileUri) { - await this.executeWithProgress( - (cancelToken: vscode.CancellationTokenSource) => this.doSaveAndOpen(fileUri, fileNode, cancelToken), - localize('saving', "Saving HDFS Files"), true, - () => vscode.window.showInformationMessage(localize('saveCanceled', "Save operation was canceled"))); - } - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('saveError', "Error on saving file: {0}", utils.getErrorMessage(err, true))); - } - } - - private async doSaveAndOpen(fileUri: vscode.Uri, fileNode: FileNode, cancelToken: vscode.CancellationTokenSource): Promise { - await fileNode.writeFileContentsToDisk(fileUri.fsPath, cancelToken); - await vscode.commands.executeCommand('vscode.open', fileUri); - } -} - -export class PreviewFileCommand extends ProgressCommand { - public static readonly DefaultMaxSize = 30 * 1024 * 1024; - - constructor(prompter: IPrompter, appContext: AppContext) { - super('mssqlCluster.previewFile', prompter, appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let fileNode = await getNode(context, this.appContext); - if (fileNode) { - await this.executeWithProgress( - async (cancelToken: vscode.CancellationTokenSource) => { - let contents = await fileNode.getFileContentsAsString(PreviewFileCommand.DefaultMaxSize); - let fileName: string = fspath.basename(fileNode.hdfsPath); - if (fspath.extname(fileName) !== '.ipynb') { - const doc = await this.openTextDocument(fileName); - const options: vscode.TextDocumentShowOptions = { - viewColumn: vscode.ViewColumn.Active, - preserveFocus: false - }; - const editor = await vscode.window.showTextDocument(doc, options); - await editor.edit(edit => { - edit.insert(new vscode.Position(0, 0), contents); - }); - } else { - let connectionProfile: azdata.IConnectionProfile = undefined; - if (context.type === constants.ObjectExplorerService) { - connectionProfile = context.explorerContext.connectionProfile; - } - await this.showNotebookDocument(fileName, connectionProfile, contents); - } - }, - localize('previewing', "Generating preview"), - false); - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('previewError', "Error on previewing file: {0}", utils.getErrorMessage(err, true))); - } - } - - private async showNotebookDocument(fileName: string, connectionProfile?: azdata.IConnectionProfile, - initialContent?: string - ): Promise { - let docUri: vscode.Uri = (await getSaveableUri(fileName, true)) - .with({ scheme: constants.UNTITLED_SCHEMA }); - return await azdata.nb.showNotebookDocument(docUri, { - connectionProfile: connectionProfile, - preview: false, - initialContent: initialContent - }); - } - - private async openTextDocument(fileName: string): Promise { - let docUri: vscode.Uri = await getSaveableUri(fileName, true); - if (docUri) { - docUri = docUri.with({ scheme: constants.UNTITLED_SCHEMA }); - return await vscode.workspace.openTextDocument(docUri); - } else { - // Can't reliably create a filename to save as so just use untitled - let language = fspath.extname(fileName); - if (language && language.length > 0) { - // trim the '.' - language = language.substring(1); - } - return await vscode.workspace.openTextDocument({ - language: language - }); - } - } -} - -export class CopyPathCommand extends Command { - public static readonly DefaultMaxSize = 30 * 1024 * 1024; - - constructor(appContext: AppContext) { - super('mssqlCluster.copyPath', appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let node = await getNode(context, this.appContext); - if (node) { - let path = node.hdfsPath; - void vscode.env.clipboard.writeText(path); - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('copyPathError', "Error on copying path: {0}", utils.getErrorMessage(err, true))); - } - } -} - -export class ManageAccessCommand extends Command { - - constructor(appContext: AppContext) { - super('mssqlCluster.manageAccess', appContext); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let node = await getNode(context, this.appContext); - if (node) { - new ManageAccessDialog(node.hdfsPath, await node.getFileSource()).openDialog(); - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - } - } catch (err) { - void vscode.window.showErrorMessage( - localize('manageAccessError', "An unexpected error occurred while opening the Manage Access dialog: {0}", utils.getErrorMessage(err, true))); - } - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/hdfsProvider.ts b/extensions/mssql/src/objectExplorerNodeProvider/hdfsProvider.ts deleted file mode 100644 index 99e78f0bfd..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/hdfsProvider.ts +++ /dev/null @@ -1,382 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as fspath from 'path'; -import * as fs from 'fs'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import * as Constants from '../constants'; -import { IFileSource, IFile, File, FileType } from './fileSources'; -import { CancelableStream } from './cancelableStream'; -import { TreeNode } from './treeNodes'; -import * as utils from '../utils'; -import { IFileNode } from './types'; -import { MountStatus } from '../hdfs/mount'; -import { SqlClusterSession } from './objectExplorerNodeProvider'; - -export interface ITreeChangeHandler { - notifyNodeChanged(node: TreeNode): void; -} -export class TreeDataContext { - - constructor(public extensionContext: vscode.ExtensionContext, public changeHandler: ITreeChangeHandler) { - - } -} - -export abstract class HdfsFileSourceNode extends TreeNode { - constructor(protected context: TreeDataContext, protected _path: string, fileSource: IFileSource | undefined, protected mountStatus?: MountStatus) { - super(fileSource); - } - - public get hdfsPath(): string { - return this._path; - } - - public get nodePathValue(): string { - return this.getDisplayName(); - } - - - protected isMounted(): boolean { - return this.mountStatus === MountStatus.Mount || this.mountStatus === MountStatus.Mount_Child; - } - - getDisplayName(): string { - return fspath.basename(this._path); - } - - public async delete(recursive: boolean = false): Promise { - const fileSource = await this.getFileSource(); - await fileSource.delete(this.hdfsPath, recursive); - // Notify parent should be updated. If at top, will return undefined which will refresh whole tree - (this.parent).onChildRemoved(); - this.context.changeHandler.notifyNodeChanged(this.parent); - } - public abstract onChildRemoved(): void; -} - -export class FolderNode extends HdfsFileSourceNode { - private children: TreeNode[] = []; - protected _nodeType: string; - constructor(context: TreeDataContext, path: string, fileSource: IFileSource | undefined, nodeType?: string, mountStatus?: MountStatus) { - super(context, path, fileSource, mountStatus); - this._nodeType = nodeType ? nodeType : Constants.MssqlClusterItems.Folder; - } - - public onChildRemoved(): void { - this.children = undefined; - } - - async getChildren(refreshChildren: boolean): Promise { - if (refreshChildren || !this.children) { - try { - const fileSource = await this.getFileSource(); - let files: IFile[] = await fileSource.enumerateFiles(this._path); - if (files) { - // Note: for now, assuming HDFS-provided sorting is sufficient - this.children = files.map((file) => { - let node: TreeNode = file.fileType === FileType.File ? - new FileNode(this.context, file.path, fileSource, this.getChildMountStatus(file)) : - new FolderNode(this.context, file.path, fileSource, Constants.MssqlClusterItems.Folder, this.getChildMountStatus(file)); - node.parent = this; - return node; - }); - } - } catch (error) { - this.children = [ErrorNode.create(localize('errorExpanding', "Error: {0}", utils.getErrorMessage(error)), this, error.statusCode)]; - } - } - return this.children; - } - - private getChildMountStatus(file: IFile): MountStatus { - if (file.mountStatus !== undefined && file.mountStatus !== MountStatus.None) { - return file.mountStatus; - } - else if (this.mountStatus !== undefined && this.mountStatus !== MountStatus.None) { - // Any child node of a mount (or subtree) must be a mount child - return MountStatus.Mount_Child; - } - return MountStatus.None; - } - - getTreeItem(): vscode.TreeItem | Promise { - let item = new vscode.TreeItem(this.getDisplayName(), vscode.TreeItemCollapsibleState.Collapsed); - // For now, folder always looks the same. We're using SQL icons to differentiate remote vs local files - item.iconPath = { - dark: this.context.extensionContext.asAbsolutePath('resources/light/Folder.svg'), - light: this.context.extensionContext.asAbsolutePath('resources/light/Folder.svg') - }; - item.contextValue = this._nodeType; - return item; - } - - getNodeInfo(): azdata.NodeInfo { - // TODO handle error message case by returning it in the OE API - // TODO support better mapping of node type - let nodeInfo: azdata.NodeInfo = { - label: this.getDisplayName(), - isLeaf: false, - errorMessage: undefined, - metadata: undefined, - nodePath: this.generateNodePath(), - nodeStatus: undefined, - nodeType: this._nodeType, - nodeSubType: this.getSubType(), - iconType: this.isMounted() ? 'Folder_mounted' : 'Folder' - }; - return nodeInfo; - } - - private getSubType(): string | undefined { - if (this.mountStatus === MountStatus.Mount) { - return Constants.MssqlClusterItemsSubType.Mount; - } else if (this.mountStatus === MountStatus.Mount_Child) { - return Constants.MssqlClusterItemsSubType.MountChild; - } - - return undefined; - } - - public async writeFile(localFile: IFile): Promise { - return this.runChildAddAction(() => this.writeFileAsync(localFile)); - } - - private async writeFileAsync(localFile: IFile): Promise { - const fileSource = await this.getFileSource(); - await fileSource.writeFile(localFile, this._path); - let fileNode = new FileNode(this.context, File.createPath(this._path, File.getBasename(localFile)), fileSource); - return fileNode; - } - - public async mkdir(name: string): Promise { - return this.runChildAddAction(() => this.mkdirAsync(name)); - } - - private async mkdirAsync(name: string): Promise { - const fileSource = await this.getFileSource(); - await fileSource.mkdir(name, this._path); - let subDir = new FolderNode(this.context, File.createPath(this._path, name), fileSource); - return subDir; - } - - private async runChildAddAction(action: () => Promise): Promise { - let node = await action(); - await this.getChildren(true); - if (this.children) { - // Find the child matching the node. This is necessary - // since writing can add duplicates. - node = this.children.find(n => n.nodePathValue === node.nodePathValue) as T; - this.context.changeHandler.notifyNodeChanged(this); - } else { - // Failed to retrieve children from server so something went wrong - node = undefined; - } - return node; - } -} - -export class ConnectionNode extends FolderNode { - - constructor(context: TreeDataContext, private displayName: string, private clusterSession: SqlClusterSession) { - super(context, '/', undefined, Constants.MssqlClusterItems.Connection); - } - - override getDisplayName(): string { - return this.displayName; - } - - public override async delete(): Promise { - throw new Error(localize('errDeleteConnectionNode', "Cannot delete a connection. Only subfolders and files can be deleted.")); - } - - override async getTreeItem(): Promise { - let item = await super.getTreeItem(); - item.contextValue = this._nodeType; - return item; - } - - public override async getFileSource(): Promise { - // The node is initially created without a filesource and then one is created only once an action is - // taken that requires a connection - const fileSource = await super.getFileSource(); - if (!fileSource) { - await this.updateFileSource(await this.clusterSession.getSqlClusterConnection()); - } - return super.getFileSource(); - } - - override getNodeInfo(): azdata.NodeInfo { - // TODO handle error message case by returning it in the OE API - // TODO support better mapping of node type - let nodeInfo: azdata.NodeInfo = { - label: this.getDisplayName(), - isLeaf: false, - errorMessage: undefined, - metadata: undefined, - nodePath: this.generateNodePath(), - nodeStatus: undefined, - nodeType: 'mssqlCluster:hdfs', - nodeSubType: undefined, - iconType: 'Folder' - }; - return nodeInfo; - } -} - -export class FileNode extends HdfsFileSourceNode implements IFileNode { - - constructor(context: TreeDataContext, path: string, fileSource: IFileSource, mountStatus?: MountStatus) { - super(context, path, fileSource, mountStatus); - } - - public onChildRemoved(): void { - // do nothing - } - - getChildren(refreshChildren: boolean): TreeNode[] | Promise { - return []; - } - - getTreeItem(): vscode.TreeItem | Promise { - let item = new vscode.TreeItem(this.getDisplayName(), vscode.TreeItemCollapsibleState.None); - item.iconPath = { - dark: this.context.extensionContext.asAbsolutePath('resources/dark/file_inverse.svg'), - light: this.context.extensionContext.asAbsolutePath('resources/light/file.svg') - }; - item.contextValue = Constants.MssqlClusterItems.File; - return item; - } - - - getNodeInfo(): azdata.NodeInfo { - // TODO improve node type handling so it's not tied to SQL Server types - let nodeInfo: azdata.NodeInfo = { - label: this.getDisplayName(), - isLeaf: true, - errorMessage: undefined, - metadata: undefined, - nodePath: this.generateNodePath(), - nodeStatus: undefined, - nodeType: Constants.MssqlClusterItems.File, - nodeSubType: this.getSubType(), - iconType: this.isMounted() ? 'FileGroupFile_mounted' : 'FileGroupFile' - }; - return nodeInfo; - } - - public async getFileContentsAsString(maxBytes?: number): Promise { - const fileSource = await this.getFileSource(); - let contents: Buffer = await fileSource.readFile(this.hdfsPath, maxBytes); - return contents ? contents.toString('utf8') : ''; - } - - public async getFileLinesAsString(maxLines: number): Promise { - const fileSource = await this.getFileSource(); - let contents: Buffer = await fileSource.readFileLines(this.hdfsPath, maxLines); - return contents ? contents.toString('utf8') : ''; - } - - public async writeFileContentsToDisk(localPath: string, cancelToken?: vscode.CancellationTokenSource): Promise { - const fileSource = await this.getFileSource(); - return new Promise((resolve, reject) => { - let readStream: fs.ReadStream = fileSource.createReadStream(this.hdfsPath); - readStream.on('error', (err) => { - reject(err); - }); - - let error: string | Error = undefined; - let writeStream = fs.createWriteStream(localPath, { - encoding: 'utf8' - }); - writeStream.on('error', (err) => { - error = err; - reject(error); - }); - writeStream.on('finish', () => { - if (!error) { - resolve(vscode.Uri.file(localPath)); - } - }); - - let cancelable = new CancelableStream(cancelToken); - cancelable.on('error', (err) => { - reject(err); - }); - - readStream.pipe(cancelable).pipe(writeStream); - }); - } - - private getSubType(): string | undefined { - let subType = ''; - if (this.getDisplayName().toLowerCase().endsWith('.jar') || this.getDisplayName().toLowerCase().endsWith('.py')) { - subType += Constants.MssqlClusterItemsSubType.Spark; - } else if (this.mountStatus === MountStatus.Mount_Child) { - subType += Constants.MssqlClusterItemsSubType.MountChild; - } - - return subType.length > 0 ? subType : undefined; - } -} - -class ErrorNode extends TreeNode { - static messageNum: number = 0; - - private _nodePathValue: string; - constructor(private message: string) { - super(undefined); - } - - public static create(message: string, parent: TreeNode, errorCode?: number): ErrorNode { - let node = new ErrorNode(message); - node.parent = parent; - if (errorCode) { - node.errorStatusCode = errorCode; - } - return node; - } - - private ensureNodePathValue(): void { - if (!this._nodePathValue) { - this._nodePathValue = `message_${ErrorNode.messageNum++}`; - } - } - - public get nodePathValue(): string { - this.ensureNodePathValue(); - return this._nodePathValue; - } - - public getChildren(refreshChildren: boolean): TreeNode[] | Promise { - return []; - } - - public getTreeItem(): vscode.TreeItem | Promise { - let item = new vscode.TreeItem(this.message, vscode.TreeItemCollapsibleState.None); - item.contextValue = Constants.MssqlClusterItems.Error; - return item; - } - - - getNodeInfo(): azdata.NodeInfo { - let nodeInfo: azdata.NodeInfo = { - label: this.message, - isLeaf: false, - errorMessage: undefined, - metadata: undefined, - nodePath: this.generateNodePath(), - nodeStatus: undefined, - nodeType: Constants.MssqlClusterItems.Error, - nodeSubType: undefined, - iconType: 'MessageType' - }; - return nodeInfo; - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/objectExplorerNodeProvider.ts b/extensions/mssql/src/objectExplorerNodeProvider/objectExplorerNodeProvider.ts deleted file mode 100644 index 2f170a8639..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/objectExplorerNodeProvider.ts +++ /dev/null @@ -1,328 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import { ProviderBase } from './providerBase'; -import { SqlClusterConnection } from './connection'; -import * as utils from '../utils'; -import { TreeNode } from './treeNodes'; -import { ConnectionNode, TreeDataContext, ITreeChangeHandler } from './hdfsProvider'; -import { AppContext } from '../appContext'; -import * as constants from '../constants'; -import { ICommandObjectExplorerContext } from './command'; -import { IPrompter, IQuestion, QuestionTypes } from '../prompts/question'; -import { getSqlClusterConnectionParams } from '../sqlClusterLookUp'; - -export const mssqlOutputChannel = vscode.window.createOutputChannel(constants.providerId); - -export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azdata.ObjectExplorerNodeProvider, ITreeChangeHandler { - public readonly supportedProviderId: string = constants.providerId; - private clusterSessionMap: Map; - private expandCompleteEmitter = new vscode.EventEmitter(); - - constructor(private prompter: IPrompter, private appContext: AppContext) { - super(); - this.clusterSessionMap = new Map(); - this.appContext.registerService(constants.ObjectExplorerService, this); - } - - handleSessionOpen(session: azdata.ObjectExplorerSession): Thenable { - return new Promise((resolve, reject) => { - if (!session) { - reject('handleSessionOpen requires a session object to be passed'); - } else { - resolve(this.doSessionOpen(session)); - } - }); - } - - private async doSessionOpen(session: azdata.ObjectExplorerSession): Promise { - if (!session || !session.sessionId) { return false; } - - let sqlConnProfile = await azdata.objectexplorer.getSessionConnectionProfile(session.sessionId); - if (!sqlConnProfile) { return false; } - - const isBigDataCluster = await utils.isBigDataCluster(sqlConnProfile.id); - if (!isBigDataCluster) { return false; } - - let clusterSession = new SqlClusterSession(session, sqlConnProfile, this.appContext, this); - this.clusterSessionMap.set(session.sessionId, clusterSession); - return true; - } - - expandNode(nodeInfo: azdata.ExpandNodeInfo, isRefresh: boolean = false): Thenable { - return new Promise((resolve, reject) => { - if (!nodeInfo) { - reject('expandNode requires a nodeInfo object to be passed'); - } else { - resolve(this.doExpandNode(nodeInfo, isRefresh)); - } - }); - } - - private async doExpandNode(nodeInfo: azdata.ExpandNodeInfo, isRefresh: boolean = false): Promise { - let session = this.clusterSessionMap.get(nodeInfo.sessionId); - let response: azdata.ObjectExplorerExpandInfo = { - sessionId: nodeInfo.sessionId, - nodePath: nodeInfo.nodePath, - errorMessage: undefined, - nodes: [] - }; - - if (!session) { - // This is not an error case. Just fire reponse with empty nodes for example: request from standalone SQL instance - this.expandCompleteEmitter.fire(response); - return false; - } else { - setTimeout(() => { - - // Running after promise resolution as we need the ADS-side map to have been updated - // Intentionally not awaiting or catching errors. - // Any failure in startExpansion should be emitted in the expand complete result - // We want this to be async and ideally return true before it completes - this.startExpansion(session, nodeInfo, isRefresh).catch(err => console.log('Error expanding Object Explorer Node ', err)); - }, 10); - } - return true; - } - - private hasExpansionError(children: TreeNode[]): boolean { - if (children.find(c => c.errorStatusCode > 0)) { - return true; - } - return false; - } - - private async startExpansion(session: SqlClusterSession, nodeInfo: azdata.ExpandNodeInfo, isRefresh: boolean = false): Promise { - let expandResult: azdata.ObjectExplorerExpandInfo = { - sessionId: session.sessionId, - nodePath: nodeInfo.nodePath, - errorMessage: undefined, - nodes: [] - }; - try { - let node = await session.rootNode.findNodeByPath(nodeInfo.nodePath, true); - if (node) { - expandResult.errorMessage = node.getNodeInfo().errorMessage; - let children = await node.getChildren(true); - if (children && children.length > 0) { - // Only child returned when failure happens : When failed with 'Unauthorized' error, prompt for password. - if (children.length === 1 && this.hasExpansionError(children)) { - if (children[0].errorStatusCode === 401) { - const sqlClusterConnection = await session.getSqlClusterConnection(); - // First prompt for username (defaulting to existing username) - let username = await this.prompter.promptSingle({ - type: QuestionTypes.input, - name: 'inputPrompt', - message: localize('promptUsername', "Please provide the username to connect to HDFS:"), - default: sqlClusterConnection.user - }); - // Only update the username if it's different than the original (the update functions ignore falsy values) - if (username === sqlClusterConnection.user) { - username = ''; - } - sqlClusterConnection.updateUsername(username); - - // And then prompt for password - const password = await this.prompter.promptSingle({ - type: QuestionTypes.password, - name: 'passwordPrompt', - message: localize('prmptPwd', "Please provide the password to connect to HDFS:"), - default: '' - }); - sqlClusterConnection.updatePassword(password); - - if (username || password) { - await node.updateFileSource(sqlClusterConnection); - children = await node.getChildren(true); - } - } - } - - expandResult.nodes = children.map(c => c.getNodeInfo()); - if (children.length === 1 && this.hasExpansionError(children)) { - let child = children[0].getNodeInfo(); - expandResult.errorMessage = child ? child.label : 'Unknown Error'; - expandResult.nodes = []; - } - } - } - } catch (error) { - expandResult.errorMessage = utils.getErrorMessage(error); - } - this.expandCompleteEmitter.fire(expandResult); - } - - refreshNode(nodeInfo: azdata.ExpandNodeInfo): Thenable { - // TODO #3815 implement properly - return this.expandNode(nodeInfo, true); - } - - handleSessionClose(closeSessionInfo: azdata.ObjectExplorerCloseSessionInfo): void { - this.clusterSessionMap.delete(closeSessionInfo.sessionId); - } - - findNodes(findNodesInfo: azdata.FindNodesInfo): Thenable { - // TODO #3814 implement - let response: azdata.ObjectExplorerFindNodesResponse = { - nodes: [] - }; - return Promise.resolve(response); - } - - registerOnExpandCompleted(handler: (response: azdata.ObjectExplorerExpandInfo) => any): void { - this.expandCompleteEmitter.event(handler); - } - - notifyNodeChanged(node: TreeNode): void { - void this.notifyNodeChangesAsync(node); - } - - private async notifyNodeChangesAsync(node: TreeNode): Promise { - try { - let session = this.getSqlClusterSessionForNode(node); - if (!session) { - void vscode.window.showErrorMessage(localize('sessionNotFound', "Session for node {0} does not exist", node.nodePathValue)); - } else { - let nodeInfo = node.getNodeInfo(); - let expandInfo: azdata.ExpandNodeInfo = { - nodePath: nodeInfo.nodePath, - sessionId: session.sessionId - }; - await this.refreshNode(expandInfo); - } - } catch (err) { - mssqlOutputChannel.appendLine(localize('notifyError', "Error notifying of node change: {0}", err)); - } - } - - private getSqlClusterSessionForNode(node: TreeNode): SqlClusterSession { - let sqlClusterSession: SqlClusterSession = undefined; - while (node !== undefined) { - if (node instanceof SqlClusterRootNode) { - sqlClusterSession = node.session; - break; - } else { - node = node.parent; - } - } - return sqlClusterSession; - } - - async findSqlClusterNodeByContext(context: ICommandObjectExplorerContext | azdata.ObjectExplorerContext): Promise { - let node: T = undefined; - let explorerContext = 'explorerContext' in context ? context.explorerContext : context; - let sqlConnProfile = explorerContext.connectionProfile; - let session = this.findSqlClusterSessionBySqlConnProfile(sqlConnProfile); - if (session) { - if (explorerContext.isConnectionNode) { - // Note: ideally fix so we verify T matches RootNode and go from there - node = session.rootNode; - } else { - // Find the node under the session - node = await session.rootNode.findNodeByPath(explorerContext.nodeInfo.nodePath, true); - } - } - return node; - } - - public findSqlClusterSessionBySqlConnProfile(connectionProfile: azdata.IConnectionProfile): SqlClusterSession | undefined { - for (let session of this.clusterSessionMap.values()) { - if (session.isMatchedSqlConnection(connectionProfile)) { - return session; - } - } - return undefined; - } -} - -export class SqlClusterSession { - private _rootNode: SqlClusterRootNode; - private _sqlClusterConnection: SqlClusterConnection | undefined = undefined; - constructor( - private _sqlSession: azdata.ObjectExplorerSession, - private _sqlConnectionProfile: azdata.IConnectionProfile, - private _appContext: AppContext, - private _changeHandler: ITreeChangeHandler - ) { - this._rootNode = new SqlClusterRootNode(this, - new TreeDataContext(this._appContext.extensionContext, this._changeHandler), - this._sqlSession.rootNode.nodePath); - } - - public async getSqlClusterConnection(): Promise { - if (!this._sqlClusterConnection) { - const sqlClusterConnectionParams = await getSqlClusterConnectionParams(this._sqlConnectionProfile, this._appContext); - this._sqlClusterConnection = new SqlClusterConnection(sqlClusterConnectionParams); - } - return this._sqlClusterConnection; - } - public get sqlSession(): azdata.ObjectExplorerSession { return this._sqlSession; } - public get sqlConnectionProfile(): azdata.IConnectionProfile { return this._sqlConnectionProfile; } - public get sessionId(): string { return this._sqlSession.sessionId; } - public get rootNode(): SqlClusterRootNode { return this._rootNode; } - - public isMatchedSqlConnection(sqlConnProfile: azdata.IConnectionProfile): boolean { - return this._sqlConnectionProfile.id === sqlConnProfile.id; - } -} - -class SqlClusterRootNode extends TreeNode { - private _children: TreeNode[]; - constructor( - private _session: SqlClusterSession, - private _treeDataContext: TreeDataContext, - private _nodePathValue: string - ) { - super(undefined); - } - - public get session(): SqlClusterSession { - return this._session; - } - - public get nodePathValue(): string { - return this._nodePathValue; - } - - public getChildren(refreshChildren: boolean): TreeNode[] | Promise { - if (refreshChildren || !this._children) { - return this.refreshChildren(); - } - return this._children; - } - - private async refreshChildren(): Promise { - this._children = []; - - let hdfsNode = new ConnectionNode(this._treeDataContext, localize('hdfsFolder', "HDFS"), this.session); - hdfsNode.parent = this; - this._children.push(hdfsNode); - return this._children; - } - - getTreeItem(): vscode.TreeItem | Promise { - throw new Error('Not intended for use in a file explorer view.'); - } - - getNodeInfo(): azdata.NodeInfo { - let nodeInfo: azdata.NodeInfo = { - label: localize('rootLabel', "Root"), - isLeaf: false, - errorMessage: undefined, - metadata: undefined, - nodePath: this.generateNodePath(), - nodeStatus: undefined, - nodeType: 'sqlCluster:root', - nodeSubType: undefined, - iconType: 'folder' - }; - return nodeInfo; - } -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/providerBase.ts b/extensions/mssql/src/objectExplorerNodeProvider/providerBase.ts deleted file mode 100644 index ee9491d24d..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/providerBase.ts +++ /dev/null @@ -1,11 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as constants from '../constants'; - -export abstract class ProviderBase { - public readonly providerId: string = constants.mssqlClusterProviderName; - public handle: number; -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/treeNodes.ts b/extensions/mssql/src/objectExplorerNodeProvider/treeNodes.ts deleted file mode 100644 index f9b64e856f..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/treeNodes.ts +++ /dev/null @@ -1,97 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { ITreeNode } from './types'; -import { IFileSource } from './fileSources'; -import { SqlClusterConnection } from './connection'; - -type TreeNodePredicate = (node: TreeNode) => boolean; - -export abstract class TreeNode implements ITreeNode { - private _parent: TreeNode = undefined; - private _errorStatusCode: number; - - constructor(private _fileSource: IFileSource | undefined) { } - - public get parent(): TreeNode { - return this._parent; - } - - public set parent(node: TreeNode) { - this._parent = node; - } - - public get errorStatusCode(): number { - return this._errorStatusCode; - } - - public set errorStatusCode(error: number) { - this._errorStatusCode = error; - } - - public generateNodePath(): string { - let path = undefined; - if (this.parent) { - path = this.parent.generateNodePath(); - } - path = path ? `${path}/${this.nodePathValue}` : this.nodePathValue; - return path; - } - - public findNodeByPath(path: string, expandIfNeeded: boolean = false): Promise { - let condition: TreeNodePredicate = (node: TreeNode) => node.getNodeInfo().nodePath === path || node.getNodeInfo().nodePath.startsWith(path); - let filter: TreeNodePredicate = (node: TreeNode) => path.startsWith(node.getNodeInfo().nodePath); - return TreeNode.findNode(this, condition, filter, true); - } - - public static async findNode(node: TreeNode, condition: TreeNodePredicate, filter: TreeNodePredicate, expandIfNeeded: boolean): Promise { - if (!node) { - return undefined; - } - - if (condition(node)) { - return node; - } - - let nodeInfo = node.getNodeInfo(); - if (nodeInfo.isLeaf) { - return undefined; - } - - // TODO #3813 support filtering by already expanded / not yet expanded - let children = await node.getChildren(false); - if (children) { - for (let child of children) { - if (filter && filter(child)) { - let childNode = await this.findNode(child, condition, filter, expandIfNeeded); - if (childNode) { - return childNode; - } - } - } - } - return undefined; - } - - public async updateFileSource(connection: SqlClusterConnection): Promise { - this._fileSource = await connection.createHdfsFileSource(); - } - - public async getFileSource(): Promise { - return this._fileSource; - } - - /** - * The value to use for this node in the node path - */ - public abstract get nodePathValue(): string; - - abstract getChildren(refreshChildren: boolean): TreeNode[] | Promise; - abstract getTreeItem(): vscode.TreeItem | Promise; - - abstract getNodeInfo(): azdata.NodeInfo; -} diff --git a/extensions/mssql/src/objectExplorerNodeProvider/types.d.ts b/extensions/mssql/src/objectExplorerNodeProvider/types.d.ts deleted file mode 100644 index 87218a84fc..0000000000 --- a/extensions/mssql/src/objectExplorerNodeProvider/types.d.ts +++ /dev/null @@ -1,28 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; -import * as azdata from 'azdata'; - -/** - * A tree node in the object explorer tree - * - * @export - */ -export interface ITreeNode { - getNodeInfo(): azdata.NodeInfo; - getChildren(refreshChildren: boolean): ITreeNode[] | Promise; -} - -/** - * A HDFS file node. This is a leaf node in the object explorer tree, and its contents - * can be queried - * - * @export - * @extends {ITreeNode} - */ -export interface IFileNode extends ITreeNode { - getFileContentsAsString(maxBytes?: number): Promise; -} diff --git a/extensions/mssql/src/prompts/adapter.ts b/extensions/mssql/src/prompts/adapter.ts deleted file mode 100644 index 36db367e93..0000000000 --- a/extensions/mssql/src/prompts/adapter.ts +++ /dev/null @@ -1,70 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import { window } from 'vscode'; -import PromptFactory from './factory'; -import EscapeException from '../escapeException'; -import { IQuestion, IPrompter } from './question'; - -// Supports simple pattern for prompting for user input and acting on this -export default class CodeAdapter implements IPrompter { - - // TODO define question interface - private fixQuestion(question: IQuestion): any { - if (question.type === 'checkbox' && Array.isArray(question.choices)) { - // For some reason when there's a choice of checkboxes, they aren't formatted properly - // Not sure where the issue is - question.choices = question.choices.map(item => { - if (typeof (item) === 'string') { - return { checked: false, name: item, value: item }; - } else { - return item; - } - }); - } - } - - public promptSingle(question: IQuestion, ignoreFocusOut?: boolean): Promise { - let questions: IQuestion[] = [question]; - return this.prompt(questions, ignoreFocusOut).then((answers: { [key: string]: T }) => { - if (answers) { - let response: T = answers[question.name]; - return response || undefined; - } - return undefined; - }); - } - - public prompt(questions: IQuestion[], ignoreFocusOut?: boolean): Promise<{ [key: string]: T }> { - let answers: { [key: string]: T } = {}; - - // Collapse multiple questions into a set of prompt steps - let promptResult: Promise<{ [key: string]: T }> = questions.reduce((promise: Promise<{ [key: string]: T }>, question: IQuestion) => { - this.fixQuestion(question); - - return promise.then(() => { - return PromptFactory.createPrompt(question, ignoreFocusOut); - }).then(prompt => { - if (!question.shouldPrompt || question.shouldPrompt(answers) === true) { - return prompt.render().then((result: T) => { - answers[question.name] = result; - - if (question.onAnswered) { - question.onAnswered(result); - } - return answers; - }); - } - return answers; - }); - }, Promise.resolve()); - - return promptResult.catch(err => { - if (err instanceof EscapeException || err instanceof TypeError) { - return undefined; - } - - void window.showErrorMessage(err.message); - }); - } -} diff --git a/extensions/mssql/src/prompts/confirm.ts b/extensions/mssql/src/prompts/confirm.ts deleted file mode 100644 index a8b00aeb66..0000000000 --- a/extensions/mssql/src/prompts/confirm.ts +++ /dev/null @@ -1,34 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -import { window } from 'vscode'; -import Prompt from './prompt'; -import EscapeException from '../escapeException'; - -export default class ConfirmPrompt extends Prompt { - - constructor(question: any, ignoreFocusOut?: boolean) { - super(question, ignoreFocusOut); - } - - public render(): any { - let choices: { [id: string]: boolean } = {}; - choices[localize('msgYes', 'Yes')] = true; - choices[localize('msgNo', 'No')] = false; - - let options = this.defaultQuickPickOptions; - options.placeHolder = this._question.message; - - return window.showQuickPick(Object.keys(choices), options) - .then(result => { - if (result === undefined) { - throw new EscapeException(); - } - - return choices[result] || false; - }); - } -} diff --git a/extensions/mssql/src/prompts/factory.ts b/extensions/mssql/src/prompts/factory.ts deleted file mode 100644 index d527a5e54d..0000000000 --- a/extensions/mssql/src/prompts/factory.ts +++ /dev/null @@ -1,24 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import Prompt from './prompt'; -import InputPrompt from './input'; -import PasswordPrompt from './password'; -import ConfirmPrompt from './confirm'; -import { IQuestion } from './question'; - -export default class PromptFactory { - - public static createPrompt(question: IQuestion, ignoreFocusOut?: boolean): Prompt { - switch (question.type) { - case 'input': - return new InputPrompt(question, ignoreFocusOut); - case 'password': - return new PasswordPrompt(question, ignoreFocusOut); - case 'confirm': - return new ConfirmPrompt(question, ignoreFocusOut); - default: - throw new Error(`Could not find a prompt for question type ${question.type}`); - } - } -} diff --git a/extensions/mssql/src/prompts/input.ts b/extensions/mssql/src/prompts/input.ts deleted file mode 100644 index 409d1ea55d..0000000000 --- a/extensions/mssql/src/prompts/input.ts +++ /dev/null @@ -1,57 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import { window, InputBoxOptions } from 'vscode'; -import Prompt from './prompt'; -import EscapeException from '../escapeException'; - -const figures = require('figures'); - -export default class InputPrompt extends Prompt { - - protected _options: InputBoxOptions; - - constructor(question: any, ignoreFocusOut?: boolean) { - super(question, ignoreFocusOut); - - this._options = this.defaultInputBoxOptions; - this._options.prompt = this._question.message; - } - - // Helper for callers to know the right type to get from the type factory - public static get promptType(): string { return 'input'; } - - public render(): any { - // Prefer default over the placeHolder, if specified - let placeHolder = this._question.default ? this._question.default : this._question.placeHolder; - - if (this._question.default instanceof Error) { - placeHolder = this._question.default.message; - this._question.default = undefined; - } - - this._options.placeHolder = placeHolder; - - return window.showInputBox(this._options) - .then(result => { - if (result === undefined) { - throw new EscapeException(); - } - - if (result === '') { - // Use the default value, if defined - result = this._question.default || ''; - } - - const validationError = this._question.validate ? this._question.validate(result || '') : undefined; - - if (validationError) { - this._question.default = new Error(`${figures.warning} ${validationError}`); - - return this.render(); - } - - return result; - }); - } -} diff --git a/extensions/mssql/src/prompts/password.ts b/extensions/mssql/src/prompts/password.ts deleted file mode 100644 index cd084c2542..0000000000 --- a/extensions/mssql/src/prompts/password.ts +++ /dev/null @@ -1,13 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import InputPrompt from './input'; - -export default class PasswordPrompt extends InputPrompt { - - constructor(question: any, ignoreFocusOut?: boolean) { - super(question, ignoreFocusOut); - - this._options.password = true; - } -} diff --git a/extensions/mssql/src/prompts/prompt.ts b/extensions/mssql/src/prompts/prompt.ts deleted file mode 100644 index ceb7adb467..0000000000 --- a/extensions/mssql/src/prompts/prompt.ts +++ /dev/null @@ -1,32 +0,0 @@ -// This code is originally from https://github.com/DonJayamanne/bowerVSCode -// License: https://github.com/DonJayamanne/bowerVSCode/blob/master/LICENSE - -import { InputBoxOptions, QuickPickOptions } from 'vscode'; -import { IQuestion } from './question'; - -abstract class Prompt { - - protected _question: IQuestion; - protected _ignoreFocusOut?: boolean; - - constructor(question: IQuestion, ignoreFocusOut?: boolean) { - this._question = question; - this._ignoreFocusOut = ignoreFocusOut ? ignoreFocusOut : false; - } - - public abstract render(): any; - - protected get defaultQuickPickOptions(): QuickPickOptions { - return { - ignoreFocusOut: this._ignoreFocusOut - }; - } - - protected get defaultInputBoxOptions(): InputBoxOptions { - return { - ignoreFocusOut: this._ignoreFocusOut - }; - } -} - -export default Prompt; diff --git a/extensions/mssql/src/prompts/question.ts b/extensions/mssql/src/prompts/question.ts deleted file mode 100644 index aa1082368e..0000000000 --- a/extensions/mssql/src/prompts/question.ts +++ /dev/null @@ -1,54 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as vscode from 'vscode'; - -export class QuestionTypes { - public static get input(): string { return 'input'; } - public static get password(): string { return 'password'; } - public static get confirm(): string { return 'confirm'; } -} - -// Question interface to clarify how to use the prompt feature -// based on Bower Question format: https://github.com/bower/bower/blob/89069784bb46bfd6639b4a75e98a0d7399a8c2cb/packages/bower-logger/README.md -export interface IQuestion { - // Type of question (see QuestionTypes) - type: string; - // Name of the question for disambiguation - name: string; - // Message to display to the user - message: string; - // Optional placeHolder to give more detailed information to the user - placeHolder?: any; - // Optional default value - this will be used instead of placeHolder - default?: any; - // optional set of choices to be used. Can be QuickPickItems or a simple name-value pair - choices?: Array; - // Optional validation function that returns an error string if validation fails - validate?: (value: any) => string; - // Optional pre-prompt function. Takes in set of answers so far, and returns true if prompt should occur - shouldPrompt?: (answers: { [id: string]: any }) => boolean; - // Optional action to take on the question being answered - onAnswered?: (value: any) => void; - // Optional set of options to support matching choices. - matchOptions?: vscode.QuickPickOptions; -} - -// Pair used to display simple choices to the user -interface INameValueChoice { - name: string; - value: any; -} - -export interface IPrompter { - promptSingle(question: IQuestion, ignoreFocusOut?: boolean): Promise; - /** - * Prompts for multiple questions - * - * @returns Map of question IDs to results, or undefined if - * the user canceled the question session - */ - prompt(questions: IQuestion[], ignoreFocusOut?: boolean): Promise<{ [questionId: string]: any }>; -} diff --git a/extensions/mssql/src/sparkFeature/dialog/dialogCommands.ts b/extensions/mssql/src/sparkFeature/dialog/dialogCommands.ts deleted file mode 100644 index 92e83f35bd..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/dialogCommands.ts +++ /dev/null @@ -1,172 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import * as vscode from 'vscode'; -const localize = nls.loadMessageBundle(); - -import { ICommandViewContext, Command, ICommandObjectExplorerContext, ICommandUnknownContext } from '../../objectExplorerNodeProvider/command'; -import { SparkJobSubmissionDialog } from './sparkJobSubmission/sparkJobSubmissionDialog'; -import { AppContext } from '../../appContext'; -import { getErrorMessage } from '../../utils'; -import * as constants from '../../constants'; -import { HdfsFileSourceNode } from '../../objectExplorerNodeProvider/hdfsProvider'; -import { getNode } from '../../objectExplorerNodeProvider/hdfsCommands'; -import * as LocalizedConstants from '../../localizedConstants'; -import * as SqlClusterLookUp from '../../sqlClusterLookUp'; -import { SqlClusterConnection } from '../../objectExplorerNodeProvider/connection'; - -interface MssqlOptions { - server: string; -} - -const timeout = (millis: number) => new Promise(c => setTimeout(c, millis)); - -export class OpenSparkJobSubmissionDialogCommand extends Command { - constructor(appContext: AppContext, private outputChannel: vscode.OutputChannel) { - super(constants.mssqlClusterLivySubmitSparkJobCommand, appContext); - } - - protected override async preExecute(context: ICommandUnknownContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandUnknownContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - try { - let sqlClusterConnection: SqlClusterConnection = undefined; - if (context.type === constants.ObjectExplorerService) { - sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(context, this.appContext); - } - if (!sqlClusterConnection) { - sqlClusterConnection = await this.selectConnection(); - } - - let dialog = new SparkJobSubmissionDialog(sqlClusterConnection, this.appContext, this.outputChannel); - await dialog.openDialog(); - } catch (error) { - void vscode.window.showErrorMessage(getErrorMessage(error)); - } - } - - private async selectConnection(): Promise { - let connectionList: azdata.connection.Connection[] = await azdata.connection.getActiveConnections(); - let connectionMap: Map = new Map(); - let selectedHost: string = undefined; - let showConnectionDialog = false; - - // Filter invalid connections - if (connectionList && connectionList.length > 0) { - connectionList = connectionList.filter(conn => conn.providerName === constants.sqlProviderName && (conn.options).server); - } - // Prompt choice if we have active connections - if (connectionList && connectionList.length > 0) { - let selectConnectionMsg = localize('selectOtherServer', "Select other SQL Server"); - let displayList: string[] = []; - connectionList.forEach(conn => { - let options: MssqlOptions = conn.options; - displayList.push(options.server); - connectionMap.set(options.server, conn); - }); - displayList.push(selectConnectionMsg); - - selectedHost = await vscode.window.showQuickPick(displayList, { - placeHolder: - localize('sparkJobSubmission.PleaseSelectSqlWithCluster', - "Please select SQL Server with Big Data Cluster.") - }); - if (selectedHost === selectConnectionMsg) { - showConnectionDialog = true; - selectedHost = undefined; - } - } else { - showConnectionDialog = true; - } - - // Show connection dialog if still don't have a server - if (showConnectionDialog) { - let connection = await azdata.connection.openConnectionDialog([constants.sqlProviderName]); - if (connection) { - let options: MssqlOptions = connection.options; - connectionMap.set(options.server, connection); - selectedHost = options.server; - // Wait an appropriate timeout so that the serverInfo object can populate... - await timeout(150); - } - } - - let errorMsg = localize('sparkJobSubmission.NoSqlSelected', "No SQL Server is selected."); - if (!selectedHost) { throw new Error(errorMsg); } - - let sqlConnection = connectionMap.get(selectedHost); - if (!sqlConnection) { throw new Error(errorMsg); } - - let sqlClusterConnection = await SqlClusterLookUp.getSqlClusterConnectionParams(sqlConnection, this.appContext); - if (!sqlClusterConnection) { - throw new Error(localize('errorNotSqlBigDataCluster', "The selected server does not belong to a SQL Server Big Data Cluster")); - } - - return new SqlClusterConnection(sqlClusterConnection); - } -} - -// Open the submission dialog for a specific file path. -export class OpenSparkJobSubmissionDialogFromFileCommand extends Command { - constructor(appContext: AppContext, private outputChannel: vscode.OutputChannel) { - super(constants.mssqlClusterLivySubmitSparkJobFromFileCommand, appContext); - } - - protected override async preExecute(context: ICommandViewContext | ICommandObjectExplorerContext, args: object = {}): Promise { - return this.execute(context, args); - } - - async execute(context: ICommandViewContext | ICommandObjectExplorerContext, ...args: any[]): Promise { - let path: string = undefined; - try { - let node = await getNode(context, this.appContext); - if (node && node.hdfsPath) { - path = node.hdfsPath; - } else { - void vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext); - return; - } - } catch (err) { - void vscode.window.showErrorMessage(localize('sparkJobSubmission.GetFilePathFromSelectedNodeFailed', "Error Get File Path: {0}", err)); - return; - } - - try { - let sqlClusterConnection: SqlClusterConnection = undefined; - if (context.type === constants.ObjectExplorerService) { - sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(context, this.appContext); - } - if (!sqlClusterConnection) { - throw new Error(LocalizedConstants.sparkJobSubmissionNoSqlBigDataClusterFound); - } - let dialog = new SparkJobSubmissionDialog(sqlClusterConnection, this.appContext, this.outputChannel); - await dialog.openDialog(path); - } catch (error) { - void vscode.window.showErrorMessage(getErrorMessage(error)); - } - } -} - -export class OpenSparkJobSubmissionDialogTask { - constructor(private appContext: AppContext, private outputChannel: vscode.OutputChannel) { - } - - async execute(profile: azdata.IConnectionProfile, ...args: any[]): Promise { - try { - let sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(profile, this.appContext); - if (!sqlClusterConnection) { - throw new Error(LocalizedConstants.sparkJobSubmissionNoSqlBigDataClusterFound); - } - let dialog = new SparkJobSubmissionDialog(sqlClusterConnection, this.appContext, this.outputChannel); - await dialog.openDialog(); - } catch (error) { - void vscode.window.showErrorMessage(getErrorMessage(error)); - } - } -} diff --git a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkAdvancedTab.ts b/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkAdvancedTab.ts deleted file mode 100644 index b7c4eecfc9..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkAdvancedTab.ts +++ /dev/null @@ -1,192 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); - -/** - * Configuration values for the advanced tab of the spark job submission dialog. - * See https://livy.incubator.apache.org/docs/latest/rest-api.html for more information - * on the specific values - */ -export interface SparkAdvancedConfigModel { - jarFiles?: string, - pyFiles?: string, - otherFiles?: string, - driverMemory?: string, - driverCores?: number, - executorMemory?: string, - executeCores?: number, - executorCount?: number, - queueName?: string, - configValues?: string -} - -const baseFormItemLayout: azdata.FormItemLayout = { - horizontal: false, - componentWidth: '400px' -}; -export class SparkAdvancedTab { - private _tab: azdata.window.DialogTab; - public get tab(): azdata.window.DialogTab { return this._tab; } - - private _referenceFilesInputBox: azdata.InputBoxComponent; - private _referenceJARFilesInputBox: azdata.InputBoxComponent; - private _referencePyFilesInputBox: azdata.InputBoxComponent; - private _driverMemoryInputBox: azdata.InputBoxComponent; - private _driverCoresInputBox: azdata.InputBoxComponent; - private _executorMemoryInputBox: azdata.InputBoxComponent; - private _executorCoresInputBox: azdata.InputBoxComponent; - private _executorCountInputBox: azdata.InputBoxComponent; - private _queueInputBox: azdata.InputBoxComponent; - private _configValuesInputBox: azdata.InputBoxComponent; - - constructor() { - this._tab = azdata.window.createTab(localize('sparkJobSubmission.AdvancedTabName', "ADVANCED")); - - this._tab.registerContent(async (modelView) => { - let builder = modelView.modelBuilder; - - let formContainer = builder.formContainer(); - - this._referenceJARFilesInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._referenceJARFilesInputBox, - title: localize('sparkJobSubmission.ReferenceJarList', "Reference Jars") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.ReferenceJarListToolTip', - "Jars to be placed in executor working directory. The Jar path needs to be an HDFS Path. Multiple paths should be split by semicolon (;)") - }); - - this._referencePyFilesInputBox = builder.inputBox().component(); - formContainer.addFormItem({ - component: this._referencePyFilesInputBox, - title: localize('sparkJobSubmission.ReferencePyList', "Reference py Files") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.ReferencePyListTooltip', - "Py Files to be placed in executor working directory. The file path needs to be an HDFS Path. Multiple paths should be split by semicolon(;)") - }); - - this._referenceFilesInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._referenceFilesInputBox, - title: localize('sparkJobSubmission.ReferenceFilesList', "Reference Files") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.ReferenceFilesListTooltip', - "Files to be placed in executor working directory. The file path needs to be an HDFS Path. Multiple paths should be split by semicolon(;)") - }); - - this._driverMemoryInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._driverMemoryInputBox, - title: localize('sparkJobSubmission.driverMemory', "Driver Memory") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.driverMemoryTooltip', "Amount of memory to allocate to the driver. Specify units as part of value. Example 512M or 2G.") - }); - - this._driverCoresInputBox = builder.inputBox() - .withProps({ inputType: 'number', min: 1 }) - .component(); - - formContainer.addFormItem( - { - component: this._driverCoresInputBox, - title: localize('sparkJobSubmission.driverCores', "Driver Cores") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.driverCoresTooltip', "Amount of CPU cores to allocate to the driver.") - }); - - this._executorMemoryInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._executorMemoryInputBox, - title: localize('sparkJobSubmission.executorMemory', "Executor Memory") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.executorMemoryTooltip', "Amount of memory to allocate to the executor. Specify units as part of value. Example 512M or 2G.") - }); - - this._executorCoresInputBox = builder.inputBox() - .withProps({ inputType: 'number', min: 1 }) - .component(); - formContainer.addFormItem( - { - component: this._executorCoresInputBox, - title: localize('sparkJobSubmission.executorCores', "Executor Cores") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.executorCoresTooltip', "Amount of CPU cores to allocate to the executor.") - }); - - this._executorCountInputBox = builder.inputBox() - .withProps({ inputType: 'number', min: 1 }) - .component(); - formContainer.addFormItem( - { - component: this._executorCountInputBox, - title: localize('sparkJobSubmission.executorCount', "Executor Count") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.executorCountTooltip', "Number of instances of the executor to run.") - }); - - this._queueInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._queueInputBox, - title: localize('sparkJobSubmission.queueName', "Queue Name") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.queueNameTooltip', "Name of the Spark queue to execute the session in.") - }); - - this._configValuesInputBox = builder.inputBox().component(); - formContainer.addFormItem( - { - component: this._configValuesInputBox, - title: localize('sparkJobSubmission.configValues', "Configuration Values") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.configValuesTooltip', "List of name value pairs containing Spark configuration values. Encoded as JSON dictionary. Example: '{\"name\":\"value\", \"name2\":\"value2\"}'.") - }); - - await modelView.initializeModel(formContainer.component()); - }); - } - - public getAdvancedConfigValues(): SparkAdvancedConfigModel { - return { - jarFiles: this._referenceJARFilesInputBox.value, - pyFiles: this._referencePyFilesInputBox.value, - otherFiles: this._referenceFilesInputBox.value, - driverMemory: this._driverMemoryInputBox.value, - driverCores: +this._driverCoresInputBox.value, - executorMemory: this._executorMemoryInputBox.value, - executeCores: +this._executorCoresInputBox.value, - executorCount: +this._executorCountInputBox.value, - queueName: this._queueInputBox.value, - configValues: this._configValuesInputBox.value - }; - } -} diff --git a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkConfigurationTab.ts b/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkConfigurationTab.ts deleted file mode 100644 index 8cbca6f345..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkConfigurationTab.ts +++ /dev/null @@ -1,288 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import * as fspath from 'path'; -import * as vscode from 'vscode'; -import * as utils from '../../../utils'; -import * as LocalizedConstants from '../../../localizedConstants'; -import * as constants from '../../../constants'; - -import { SparkJobSubmissionModel } from './sparkJobSubmissionModel'; -import { SparkFileSource } from './sparkJobSubmissionService'; - -const localize = nls.loadMessageBundle(); - -/** - * Configuration values for the general tab of the spark job submission dialog. - * See https://livy.incubator.apache.org/docs/latest/rest-api.html for more information - * on the specific values - */ -export interface SparkConfigModel { - jobName: string, - mainClass: string, - arguments: string -} - -const baseFormItemLayout: azdata.FormItemLayout = { - horizontal: false, - componentWidth: '400px' -}; - -export class SparkConfigurationTab { - private _tab: azdata.window.DialogTab; - public get tab(): azdata.window.DialogTab { return this._tab; } - - private _jobNameInputBox: azdata.InputBoxComponent; - private _sparkContextLabel: azdata.TextComponent; - private _fileSourceDropDown: azdata.DropDownComponent; - private _sparkSourceFileInputBox: azdata.InputBoxComponent; - private _filePickerButton: azdata.ButtonComponent; - private _sourceFlexContainer: azdata.FlexContainer; - private _sourceFlexContainerWithHint: azdata.FlexContainer; - private _localUploadDestinationLabel: azdata.TextComponent; - private _mainClassInputBox: azdata.InputBoxComponent; - private _argumentsInputBox: azdata.InputBoxComponent; - - // If path is specified, means the default source setting for this tab is HDFS file, otherwise, it would be local file. - constructor(private _dataModel: SparkJobSubmissionModel, private _path?: string) { - this._tab = azdata.window.createTab(localize('sparkJobSubmission.GeneralTabName', "GENERAL")); - - this._tab.registerContent(async (modelView) => { - let builder = modelView.modelBuilder; - - let formContainer = builder.formContainer(); - - this._jobNameInputBox = builder.inputBox().withProps({ - placeHolder: localize('sparkJobSubmission.JobNamePlaceHolder', "Enter a name ..."), - value: (this._path) ? fspath.basename(this._path) : '' - }).component(); - - formContainer.addFormItem({ - component: this._jobNameInputBox, - title: localize('sparkJobSubmission.JobName', "Job Name"), - required: true - }, baseFormItemLayout); - - this._sparkContextLabel = builder.text().withProps({ - value: this._dataModel.getSparkClusterUrl() - }).component(); - formContainer.addFormItem({ - component: this._sparkContextLabel, - title: localize('sparkJobSubmission.SparkCluster', "Spark Cluster") - }, baseFormItemLayout); - - this._fileSourceDropDown = builder.dropDown().withProps({ - values: [SparkFileSource.Local.toString(), SparkFileSource.HDFS.toString()], - value: (this._path) ? SparkFileSource.HDFS.toString() : SparkFileSource.Local.toString() - }).component(); - - this._fileSourceDropDown.onValueChanged(async selection => { - let isLocal = selection.selected === SparkFileSource.Local.toString(); - // Disable browser button for cloud source. - if (this._filePickerButton) { - await this._filePickerButton.updateProperties({ - enabled: isLocal, - required: isLocal - }); - } - - // Clear the path When switching source. - if (this._sparkSourceFileInputBox) { - this._sparkSourceFileInputBox.value = ''; - } - - if (this._localUploadDestinationLabel) { - if (isLocal) { - this._localUploadDestinationLabel.value = LocalizedConstants.sparkLocalFileDestinationHint; - } else { - this._localUploadDestinationLabel.value = ''; - } - } - }); - - this._sparkSourceFileInputBox = builder.inputBox().withProps({ - required: true, - placeHolder: localize('sparkJobSubmission.FilePathPlaceHolder', "Path to a .jar or .py file"), - value: (this._path) ? this._path : '' - }).component(); - this._sparkSourceFileInputBox.onTextChanged(async text => { - if (this._fileSourceDropDown.value === SparkFileSource.Local.toString()) { - this._dataModel.updateModelByLocalPath(text); - if (this._localUploadDestinationLabel) { - if (text) { - this._localUploadDestinationLabel.value = localize('sparkJobSubmission.LocalFileDestinationHintWithPath', - "The selected local file will be uploaded to HDFS: {0}", this._dataModel.hdfsSubmitFilePath); - } else { - this._localUploadDestinationLabel.value = LocalizedConstants.sparkLocalFileDestinationHint; - } - } - } else { - this._dataModel.hdfsSubmitFilePath = text; - } - - // main class disable/enable is according to whether it's jar file. - let isJarFile = this._dataModel.isJarFile(); - await this._mainClassInputBox.updateProperties({ enabled: isJarFile, required: isJarFile }); - if (!isJarFile) { - // Clear main class for py file. - this._mainClassInputBox.value = ''; - } - }); - - this._filePickerButton = builder.button().withProps({ - enabled: (this._path) ? false : true, - label: '•••', - width: constants.mssqlClusterSparkJobFileSelectorButtonWidth, - height: constants.mssqlClusterSparkJobFileSelectorButtonHeight, - secondary: true - }).component(); - this._filePickerButton.onDidClick(() => this.onSelectFile()); - - this._sourceFlexContainer = builder.flexContainer().component(); - this._sourceFlexContainer.addItem(this._fileSourceDropDown, { flex: '0 0 auto', CSSStyles: { 'minWidth': '75px', 'marginBottom': '5px', 'paddingRight': '3px' } }); - this._sourceFlexContainer.addItem(this._sparkSourceFileInputBox, { flex: '1 1 auto', CSSStyles: { 'marginBottom': '5px', 'paddingRight': '3px' } }); - // Do not add margin for file picker button as the label forces it to have 5px margin - this._sourceFlexContainer.addItem(this._filePickerButton, { flex: '0 0 auto' }); - this._sourceFlexContainer.setLayout({ - flexFlow: 'row', - height: '100%', - justifyContent: 'center', - alignItems: 'center', - alignContent: 'stretch' - }); - - this._localUploadDestinationLabel = builder.text().withProps({ - value: (this._path) ? '' : LocalizedConstants.sparkLocalFileDestinationHint - }).component(); - this._sourceFlexContainerWithHint = builder.flexContainer().component(); - this._sourceFlexContainerWithHint.addItem(this._sourceFlexContainer, { flex: '0 0 auto' }); - this._sourceFlexContainerWithHint.addItem(this._localUploadDestinationLabel, { flex: '1 1 auto' }); - this._sourceFlexContainerWithHint.setLayout({ - flexFlow: 'column', - width: '100%', - justifyContent: 'center', - alignItems: 'stretch', - alignContent: 'stretch' - }); - - formContainer.addFormItem({ - component: this._sourceFlexContainerWithHint, - title: localize('sparkJobSubmission.MainFilePath', "JAR/py File"), - required: true - }, baseFormItemLayout); - - this._mainClassInputBox = builder.inputBox().component(); - formContainer.addFormItem({ - component: this._mainClassInputBox, - title: localize('sparkJobSubmission.MainClass', "Main Class"), - required: true - }, baseFormItemLayout); - - this._argumentsInputBox = builder.inputBox().component(); - formContainer.addFormItem({ - component: this._argumentsInputBox, - title: localize('sparkJobSubmission.Arguments', "Arguments") - }, - { - ...baseFormItemLayout, - info: localize('sparkJobSubmission.ArgumentsTooltip', "Command line arguments used in your main class, multiple arguments should be split by space.") - }); - - await modelView.initializeModel(formContainer.component()); - }); - } - - public async validate(): Promise { - if (!this._jobNameInputBox.value) { - this._dataModel.showDialogError(localize('sparkJobSubmission.NotSpecifyJobName', "Property Job Name is not specified.")); - return false; - } - - if (this._fileSourceDropDown.value === SparkFileSource.Local.toString()) { - if (this._sparkSourceFileInputBox.value) { - this._dataModel.isMainSourceFromLocal = true; - this._dataModel.updateModelByLocalPath(this._sparkSourceFileInputBox.value); - } else { - this._dataModel.showDialogError(localize('sparkJobSubmission.NotSpecifyJARPYPath', "Property JAR/py File is not specified.")); - return false; - } - } else { - if (this._sparkSourceFileInputBox.value) { - this._dataModel.isMainSourceFromLocal = false; - this._dataModel.hdfsSubmitFilePath = this._sparkSourceFileInputBox.value; - } else { - this._dataModel.showDialogError(localize('sparkJobSubmission.NotSpecifyJARPYPath', "Property JAR/py File is not specified.")); - return false; - } - } - - if (this._dataModel.isJarFile() && !this._mainClassInputBox.value) { - this._dataModel.showDialogError(localize('sparkJobSubmission.NotSpecifyMainClass', "Property Main Class is not specified.")); - return false; - } - - // 1. For local file Source check whether they existed. - if (this._dataModel.isMainSourceFromLocal) { - if (!(await utils.exists(this._dataModel.localFileSourcePath))) { - this._dataModel.showDialogError(LocalizedConstants.sparkJobSubmissionLocalFileNotExisted(this._dataModel.localFileSourcePath)); - return false; - } - } else { - // 2. Check HDFS file existed for HDFS source. - try { - let isFileExisted = await this._dataModel.isClusterFileExisted(this._dataModel.hdfsSubmitFilePath); - if (!isFileExisted) { - this._dataModel.showDialogError(localize('sparkJobSubmission.HDFSFileNotExistedWithPath', "{0} does not exist in Cluster or exception thrown. ", this._dataModel.hdfsSubmitFilePath)); - return false; - } - } catch (error) { - this._dataModel.showDialogError(localize('sparkJobSubmission.HDFSFileNotExisted', "The specified HDFS file does not exist. ")); - return false; - } - } - - return true; - } - - private async onSelectFile(): Promise { - let filePath = await this.pickFile(); - if (filePath) { - this._sparkSourceFileInputBox.value = filePath; - } - } - - public getSparkConfigValues(): SparkConfigModel { - return { - jobName: this._jobNameInputBox.value ?? '', - mainClass: this._mainClassInputBox.value ?? '', - arguments: this._argumentsInputBox.value ?? '' - }; - } - - public async pickFile(): Promise { - try { - let filter = { 'JAR/py files': ['jar', 'py'] }; - let options: vscode.OpenDialogOptions = { - canSelectFiles: true, - canSelectFolders: false, - canSelectMany: false, - openLabel: localize('sparkSelectLocalFile', "Select"), - filters: filter - }; - - let fileUris: vscode.Uri[] = await vscode.window.showOpenDialog(options); - if (fileUris && fileUris[0]) { - return fileUris[0].fsPath; - } - - return undefined; - } catch (err) { - void vscode.window.showErrorMessage(localize('sparkJobSubmission.SelectFileError', "Error in locating the file due to Error: {0}", utils.getErrorMessage(err))); - return undefined; - } - } -} diff --git a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionDialog.ts b/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionDialog.ts deleted file mode 100644 index 62c9827844..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionDialog.ts +++ /dev/null @@ -1,166 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import * as utils from '../../../utils'; -import * as LocalizedConstants from '../../../localizedConstants'; - -import { AppContext } from '../../../appContext'; -import { SparkJobSubmissionModel } from './sparkJobSubmissionModel'; -import { SparkConfigurationTab } from './sparkConfigurationTab'; -import { SparkJobSubmissionInput } from './sparkJobSubmissionService'; -import { SparkAdvancedTab } from './sparkAdvancedTab'; -import { SqlClusterConnection } from '../../../objectExplorerNodeProvider/connection'; - -const localize = nls.loadMessageBundle(); - -export class SparkJobSubmissionDialog { - private _dialog: azdata.window.Dialog; - private _dataModel: SparkJobSubmissionModel; - private _sparkConfigTab: SparkConfigurationTab; - private _sparkAdvancedTab: SparkAdvancedTab; - - constructor( - private sqlClusterConnection: SqlClusterConnection, - private appContext: AppContext, - private outputChannel: vscode.OutputChannel) { - if (!this.sqlClusterConnection || !this.appContext || !this.outputChannel) { - throw new Error(localize('sparkJobSubmission.SparkJobSubmissionDialogInitializeError', - "Parameters for SparkJobSubmissionDialog is illegal")); - } - } - - public async openDialog(path?: string): Promise { - this._dialog = azdata.window.createModelViewDialog(localize('sparkJobSubmission.DialogTitleNewJob', "New Job")); - - this._dataModel = new SparkJobSubmissionModel(this.sqlClusterConnection, this._dialog, this.appContext); - - this._sparkConfigTab = new SparkConfigurationTab(this._dataModel, path); - this._sparkAdvancedTab = new SparkAdvancedTab(); - - this._dialog.content = [this._sparkConfigTab.tab, this._sparkAdvancedTab.tab]; - - this._dialog.cancelButton.label = localize('sparkJobSubmission.DialogCancelButton', "Cancel"); - - this._dialog.okButton.label = localize('sparkJobSubmission.DialogSubmitButton', "Submit"); - this._dialog.okButton.onClick(() => this.onClickOk()); - - this._dialog.registerCloseValidator(() => this.handleValidate()); - - azdata.window.openDialog(this._dialog); - } - - private onClickOk(): void { - let jobName = localize('sparkJobSubmission.SubmitSparkJob', "{0} Spark Job Submission:", - this._sparkConfigTab.getSparkConfigValues().jobName); - azdata.tasks.startBackgroundOperation( - { - connection: this.sqlClusterConnection.connection, - displayName: jobName, - description: jobName, - isCancelable: false, - operation: op => { - void this.onSubmit(op); - } - } - ); - } - - private async onSubmit(op: azdata.BackgroundOperation): Promise { - try { - this.outputChannel.show(); - let msg = localize('sparkJobSubmission.SubmissionStartMessage', - ".......................... Submit Spark Job Start .........................."); - this.outputChannel.appendLine(msg); - // 1. Upload local file to HDFS for local source. - if (this._dataModel.isMainSourceFromLocal) { - try { - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionPrepareUploadingFile(this._dataModel.localFileSourcePath, this._dataModel.hdfsFolderDestinationPath))); - op.updateStatus(azdata.TaskStatus.InProgress, LocalizedConstants.sparkJobSubmissionPrepareUploadingFile(this._dataModel.localFileSourcePath, this._dataModel.hdfsFolderDestinationPath)); - await this._dataModel.uploadFile(this._dataModel.localFileSourcePath, this._dataModel.hdfsFolderDestinationPath); - void vscode.window.showInformationMessage(LocalizedConstants.sparkJobSubmissionUploadingFileSucceeded); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionUploadingFileSucceeded)); - op.updateStatus(azdata.TaskStatus.InProgress, LocalizedConstants.sparkJobSubmissionUploadingFileSucceeded); - } catch (error) { - void vscode.window.showErrorMessage(LocalizedConstants.sparkJobSubmissionUploadingFileFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(this.addErrorTag(LocalizedConstants.sparkJobSubmissionUploadingFileFailed(utils.getErrorMessage(error)))); - op.updateStatus(azdata.TaskStatus.Failed, LocalizedConstants.sparkJobSubmissionUploadingFileFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(LocalizedConstants.sparkJobSubmissionEndMessage); - return; - } - } - - // 2. Submit job to cluster. - let submissionSettings: SparkJobSubmissionInput = this.getSubmissionInput(); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionPrepareSubmitJob(submissionSettings.config.jobName))); - op.updateStatus(azdata.TaskStatus.InProgress, LocalizedConstants.sparkJobSubmissionPrepareSubmitJob(submissionSettings.config.jobName)); - let livyBatchId = await this._dataModel.submitBatchJobByLivy(submissionSettings); - void vscode.window.showInformationMessage(LocalizedConstants.sparkJobSubmissionSparkJobHasBeenSubmitted); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionSparkJobHasBeenSubmitted)); - op.updateStatus(azdata.TaskStatus.InProgress, LocalizedConstants.sparkJobSubmissionSparkJobHasBeenSubmitted); - - // 3. Get SparkHistory/YarnUI Url. - try { - let appId = await this._dataModel.getApplicationID(submissionSettings, livyBatchId); - - let sparkHistoryUrl = this._dataModel.generateSparkHistoryUIUrl(submissionSettings, appId); - void vscode.window.showInformationMessage(LocalizedConstants.sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryUrl)); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryUrl))); - op.updateStatus(azdata.TaskStatus.Succeeded, LocalizedConstants.sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryUrl)); - - /* - // Spark Tracking URl is not working now. - let sparkTrackingUrl = this._dataModel.generateSparkTrackingUIUrl(submissionSettings, appId); - vscode.window.showInformationMessage(LocalizedConstants.sparkJobSubmissionTrackingLinkMessage(sparkTrackingUrl)); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionTrackingLinkMessage(sparkTrackingUrl))); - op.updateStatus(azdata.TaskStatus.Succeeded, LocalizedConstants.sparkJobSubmissionTrackingLinkMessage(sparkTrackingUrl)); - */ - - let yarnUIUrl = this._dataModel.generateYarnUIUrl(submissionSettings, appId); - void vscode.window.showInformationMessage(LocalizedConstants.sparkJobSubmissionYarnUIMessage(yarnUIUrl)); - this.outputChannel.appendLine(this.addInfoTag(LocalizedConstants.sparkJobSubmissionYarnUIMessage(yarnUIUrl))); - op.updateStatus(azdata.TaskStatus.Succeeded, LocalizedConstants.sparkJobSubmissionYarnUIMessage(yarnUIUrl)); - } catch (error) { - void vscode.window.showErrorMessage(LocalizedConstants.sparkJobSubmissionGetApplicationIdFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(this.addErrorTag(LocalizedConstants.sparkJobSubmissionGetApplicationIdFailed(utils.getErrorMessage(error)))); - op.updateStatus(azdata.TaskStatus.Failed, LocalizedConstants.sparkJobSubmissionGetApplicationIdFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(LocalizedConstants.sparkJobSubmissionEndMessage); - return; - } - - this.outputChannel.appendLine(LocalizedConstants.sparkJobSubmissionEndMessage); - } catch (error) { - void vscode.window.showErrorMessage(LocalizedConstants.sparkJobSubmissionSubmitJobFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(this.addErrorTag(LocalizedConstants.sparkJobSubmissionSubmitJobFailed(utils.getErrorMessage(error)))); - op.updateStatus(azdata.TaskStatus.Failed, LocalizedConstants.sparkJobSubmissionSubmitJobFailed(utils.getErrorMessage(error))); - this.outputChannel.appendLine(LocalizedConstants.sparkJobSubmissionEndMessage); - } - } - - private async handleValidate(): Promise { - return this._sparkConfigTab.validate(); - } - - private getSubmissionInput(): SparkJobSubmissionInput { - const generalConfig = this._sparkConfigTab.getSparkConfigValues(); - const advancedConfig = this._sparkAdvancedTab.getAdvancedConfigValues(); - return new SparkJobSubmissionInput( - { - sparkFile: this._dataModel.hdfsSubmitFilePath, - ...generalConfig, - ...advancedConfig - }); - } - - private addInfoTag(info: string): string { - return `[Info] ${info}`; - } - - private addErrorTag(error: string): string { - return `[Error] ${error}`; - } -} diff --git a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionModel.ts b/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionModel.ts deleted file mode 100644 index 4c3494ec3d..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionModel.ts +++ /dev/null @@ -1,202 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); -import * as fspath from 'path'; -import * as os from 'os'; - -import * as constants from '../../../constants'; -import { SqlClusterConnection } from '../../../objectExplorerNodeProvider/connection'; -import * as LocalizedConstants from '../../../localizedConstants'; -import * as utils from '../../../utils'; -import { SparkJobSubmissionService, SparkJobSubmissionInput, LivyLogResponse } from './sparkJobSubmissionService'; -import { AppContext } from '../../../appContext'; -import { IFileSource, File, joinHdfsPath, FileType } from '../../../objectExplorerNodeProvider/fileSources'; - - -// Stores important state and service methods used by the Spark Job Submission Dialog. -export class SparkJobSubmissionModel { - private _dialogService: SparkJobSubmissionService; - private _guidForClusterFolder: string; - public get guidForClusterFolder(): string { return this._guidForClusterFolder; } - - // Whether the file is from local or HDFS - public isMainSourceFromLocal: boolean; - - // indicate the final path to be submitted within HDFS - public hdfsSubmitFilePath: string; - - // local file uploading related path: source; destinationFolder - public localFileSourcePath: string; - public hdfsFolderDestinationPath: string; - - constructor( - private readonly _sqlClusterConnection: SqlClusterConnection, - private readonly _dialog: azdata.window.Dialog, - private readonly _appContext: AppContext) { - - if (!this._sqlClusterConnection || !this._dialog || !this._appContext) { - throw new Error(localize('sparkJobSubmission.SparkJobSubmissionModelInitializeError', - "Parameters for SparkJobSubmissionModel is illegal")); - } - - this._dialogService = new SparkJobSubmissionService(); - this._guidForClusterFolder = utils.generateGuid(); - } - - public get connection(): SqlClusterConnection { return this._sqlClusterConnection; } - public get dialogService(): SparkJobSubmissionService { return this._dialogService; } - public get dialog(): azdata.window.Dialog { return this._dialog; } - - public isJarFile(): boolean { - if (this.hdfsSubmitFilePath) { - return this.hdfsSubmitFilePath.toLowerCase().endsWith('jar'); - } - - return false; - } - - public showDialogError(message: string): void { - let errorLevel = azdata.window.MessageLevel ? azdata.window.MessageLevel : 0; - this._dialog.message = { - text: message, - level: errorLevel - }; - } - - public showDialogInfo(message: string): void { - let infoLevel = azdata.window.MessageLevel ? azdata.window.MessageLevel.Information : 2; - this._dialog.message = { - text: message, - level: infoLevel - }; - } - - public getSparkClusterUrl(): string { - if (this._sqlClusterConnection && this._sqlClusterConnection.host && this._sqlClusterConnection.port) { - return `https://${this._sqlClusterConnection.host}:${this._sqlClusterConnection.port}`; - } - - // Only for safety check, Won't happen with correct Model initialize. - return ''; - } - - public async submitBatchJobByLivy(submissionArgs: SparkJobSubmissionInput): Promise { - try { - if (!submissionArgs) { - return Promise.reject(localize('sparkJobSubmission.submissionArgsIsInvalid', "submissionArgs is invalid. ")); - } - - submissionArgs.setSparkClusterInfo(this._sqlClusterConnection); - let livyBatchId = await this._dialogService.submitBatchJob(submissionArgs); - return livyBatchId; - } catch (error) { - return Promise.reject(error); - } - } - - public async getApplicationID(submissionArgs: SparkJobSubmissionInput, livyBatchId: string, retryTime?: number): Promise { - // TODO: whether set timeout as 15000ms - try { - if (!submissionArgs) { - return Promise.reject(localize('sparkJobSubmission.submissionArgsIsInvalid', "submissionArgs is invalid. ")); - } - - if (!utils.isValidNumber(livyBatchId)) { - return Promise.reject(new Error(localize('sparkJobSubmission.LivyBatchIdIsInvalid', "livyBatchId is invalid. "))); - } - - if (!retryTime) { - retryTime = constants.mssqlClusterLivyRetryTimesForCheckYarnApp; - } - - submissionArgs.setSparkClusterInfo(this._sqlClusterConnection); - let response: LivyLogResponse = undefined; - let timeOutCount: number = 0; - do { - timeOutCount++; - await this.sleep(constants.mssqlClusterLivyTimeInMSForCheckYarnApp); - response = await this._dialogService.getYarnAppId(submissionArgs, livyBatchId); - } while (response.appId === '' && timeOutCount < retryTime); - - if (response.appId === '') { - return Promise.reject(localize('sparkJobSubmission.GetApplicationIdTimeOut', "Get Application Id time out. {0}[Log] {1}", os.EOL, response.log)); - } else { - return response.appId; - } - } catch (error) { - return Promise.reject(error); - } - } - - public async uploadFile(localFilePath: string, hdfsFolderPath: string): Promise { - try { - if (!localFilePath || !hdfsFolderPath) { - return Promise.reject(localize('sparkJobSubmission.localFileOrFolderNotSpecified.', "Property localFilePath or hdfsFolderPath is not specified. ")); - } - - if (!(await utils.exists(localFilePath))) { - return Promise.reject(LocalizedConstants.sparkJobSubmissionLocalFileNotExisted(localFilePath)); - } - - const fileSource: IFileSource = await this._sqlClusterConnection.createHdfsFileSource(); - await fileSource.writeFile(new File(localFilePath, FileType.File), hdfsFolderPath); - } catch (error) { - return Promise.reject(error); - } - } - - public async isClusterFileExisted(path: string): Promise { - try { - if (!path) { - return Promise.reject(localize('sparkJobSubmission.PathNotSpecified.', "Property Path is not specified. ")); - } - - let fileSource: IFileSource = await this._sqlClusterConnection.createHdfsFileSource(); - return await fileSource.exists(path); - } catch (error) { - return Promise.reject(error); - } - } - - public updateModelByLocalPath(localPath: string): void { - if (localPath) { - this.localFileSourcePath = localPath; - this.hdfsFolderDestinationPath = this.generateDestinationFolder(); - let fileName = fspath.basename(localPath); - this.hdfsSubmitFilePath = joinHdfsPath(this.hdfsFolderDestinationPath, fileName); - } else { - this.hdfsSubmitFilePath = ''; - } - } - - // Example path: /SparkSubmission/2018/08/21/b682a6c4-1954-401e-8542-9c573d69d9c0/default_artifact.jar - private generateDestinationFolder(): string { - let day = new Date(); - return `/SparkSubmission/${day.getUTCFullYear()}/${day.getUTCMonth() + 1}/${day.getUTCDate()}/${this._guidForClusterFolder}`; - } - - // Example: https://host:30443/gateway/default/yarn/cluster/app/application_1532646201938_0057 - public generateYarnUIUrl(submissionArgs: SparkJobSubmissionInput, appId: string): string { - return `https://${submissionArgs.host}:${submissionArgs.port}/gateway/default/yarn/cluster/app/${appId}`; - } - - // Example: https://host:30443/gateway/default/yarn/proxy/application_1532646201938_0411 - public generateSparkTrackingUIUrl(submissionArgs: SparkJobSubmissionInput, appId: string): string { - return `https://${submissionArgs.host}:${submissionArgs.port}/gateway/default/yarn/proxy/${appId}`; - } - - // Example: https://host:30443/gateway/default/sparkhistory/history/application_1532646201938_0057/1 - public generateSparkHistoryUIUrl(submissionArgs: SparkJobSubmissionInput, appId: string): string { - return `https://${submissionArgs.host}:${submissionArgs.port}/gateway/default/sparkhistory/history/${appId}/1`; - } - - private async sleep(ms: number): Promise<{}> { - // tslint:disable-next-line no-string-based-set-timeout - return new Promise(resolve => setTimeout(resolve, ms)); - } -} diff --git a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionService.ts b/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionService.ts deleted file mode 100644 index e6519f721c..0000000000 --- a/extensions/mssql/src/sparkFeature/dialog/sparkJobSubmission/sparkJobSubmissionService.ts +++ /dev/null @@ -1,240 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as os from 'os'; -import * as nls from 'vscode-nls'; -const localize = nls.loadMessageBundle(); -import * as constants from '../../../constants'; -import { SqlClusterConnection } from '../../../objectExplorerNodeProvider/connection'; -import * as utils from '../../../utils'; -import * as auth from '../../../util/auth'; -import * as request from 'request-light'; - -export class SparkJobSubmissionService { - public async submitBatchJob(submissionArgs: SparkJobSubmissionInput): Promise { - let livyUrl: string = `https://${submissionArgs.host}:${submissionArgs.port}${submissionArgs.livyPath}/`; - - // Get correct authentication headers - let headers = await this.getAuthenticationHeaders(submissionArgs); - - let options: request.XHROptions = { - url: livyUrl, - type: 'POST', - strictSSL: !auth.getIgnoreSslVerificationConfigSetting(), - data: { - file: submissionArgs.config.sparkFile, - proxyUser: submissionArgs.user, - className: submissionArgs.config.mainClass, - name: submissionArgs.config.jobName - }, - // authentication headers - headers: headers - }; - - // Now set the other parameters based on the user configuration - see - // https://livy.incubator.apache.org/docs/latest/rest-api.html for more detailed information - - // Set arguments - const args = submissionArgs.config.arguments?.trim(); - if (arguments) { - const argsList = args.split(' '); - if (argsList.length > 0) { - options.data['args'] = argsList; - } - } - - // Set jars files - const jarFiles = submissionArgs.config.jarFiles?.trim(); - if (jarFiles) { - const jarList = jarFiles.split(';'); - if (jarList.length > 0) { - options.data['jars'] = jarList; - } - } - - // Set py files - if (submissionArgs.config.pyFiles?.trim()) { - const pyList = submissionArgs.config.pyFiles.split(';'); - if (pyList.length > 0) { - options.data['pyFiles'] = pyList; - } - } - - // Set other files - const otherFiles = submissionArgs.config.otherFiles?.trim(); - if (otherFiles) { - const otherList = otherFiles.split(';'); - if (otherList.length > 0) { - options.data['files'] = otherList; - } - } - - // Set driver memory - const driverMemory = submissionArgs.config.driverMemory?.trim(); - if (driverMemory) { - options.data['driverMemory'] = driverMemory; - } - - // Set driver cores - if (submissionArgs.config.driverCores) { - options.data['driverCores'] = submissionArgs.config.driverCores; - } - - // Set executor memory - const executorMemory = submissionArgs.config.executorMemory?.trim(); - if (executorMemory) { - options.data['executorMemory'] = executorMemory; - } - - // Set executor cores - if (submissionArgs.config.executorCores) { - options.data['executorCores'] = submissionArgs.config.executorCores; - } - - // Set executor count - if (submissionArgs.config.executorCount) { - options.data['numExecutors'] = submissionArgs.config.executorCount; - } - - if (submissionArgs.config.queueName) { - options.data['queue'] = submissionArgs.config.queueName; - } - // Set driver memory - const configurationValues = submissionArgs.config.configValues?.trim(); - if (configurationValues) { - options.data['conf'] = configurationValues; - } - - options.data = JSON.stringify(options.data); - - // Note this is currently required to be called each time since request-light is overwriting - // the setting passed in through the options. If/when that gets fixed this can be removed - request.configure(null, !auth.getIgnoreSslVerificationConfigSetting()); - - const response = JSON.parse((await request.xhr(options)).responseText); - if (response && utils.isValidNumber(response.id)) { - return response.id; - } - - throw new Error(localize('sparkJobSubmission.LivyNoBatchIdReturned', - "No Spark job batch id is returned from response.{0}[Error] {1}", os.EOL, JSON.stringify(response))); - } - - private async getAuthenticationHeaders(submissionArgs: SparkJobSubmissionInput) { - let headers = {}; - if (submissionArgs.isIntegratedAuth) { - let kerberosToken = await auth.authenticateKerberos(submissionArgs.host); - headers = { Authorization: `Negotiate ${kerberosToken}` }; - } - else { - headers = { Authorization: 'Basic ' + Buffer.from(submissionArgs.user + ':' + submissionArgs.password).toString('base64') }; - } - return headers; - } - - public async getYarnAppId(submissionArgs: SparkJobSubmissionInput, livyBatchId: string): Promise { - let livyUrl = `https://${submissionArgs.host}:${submissionArgs.port}${submissionArgs.livyPath}/${livyBatchId}/log`; - let headers = await this.getAuthenticationHeaders(submissionArgs); - - let options: request.XHROptions = { - url: livyUrl, - type: 'GET', - strictSSL: !auth.getIgnoreSslVerificationConfigSetting(), - // authentication headers - headers: headers - }; - - // Note this is currently required to be called each time since request-light is overwriting - // the setting passed in through the options. If/when that gets fixed this can be removed - request.configure(null, !auth.getIgnoreSslVerificationConfigSetting()); - - const response = JSON.parse((await request.xhr(options)).responseText); - if (response && response.log) { - return this.extractYarnAppIdFromLog(response.log); - } - - throw new Error(localize('sparkJobSubmission.LivyNoLogReturned', - "No log is returned within response.{0}[Error] {1}", os.EOL, JSON.stringify(response))); - } - - - private extractYarnAppIdFromLog(log: any): LivyLogResponse { - let logForPrint = log; - if (Array.isArray(log)) { - logForPrint = log.join(os.EOL); - } - - // eg: '18/08/23 11:02:50 INFO yarn.Client: Application report for application_1532646201938_0182 (state: ACCEPTED)' - for (let entry of log) { - if (entry.indexOf('Application report for') >= 0 && entry.indexOf('(state: ACCEPTED)') >= 0) { - let tokens = entry.split(' '); - for (let token of tokens) { - if (token.startsWith('application_')) { - return new LivyLogResponse(logForPrint, token); - } - } - } - } - - return new LivyLogResponse(logForPrint, ''); - } -} - -/** - * The configuration values for the spark job submission. See https://livy.incubator.apache.org/docs/latest/rest-api.html - * for more detailed information. - */ -export interface SparkJobSubmissionConfig { - readonly jobName: string, - readonly sparkFile: string, - readonly mainClass: string, - readonly arguments?: string, - readonly jarFiles?: string, - readonly pyFiles?: string, - readonly otherFiles?: string, - readonly driverMemory?: string, - readonly driverCores?: number, - readonly executorMemory?: string, - readonly executorCores?: number, - readonly executorCount?: number, - readonly queueName?: string, - readonly configValues?: string -} - -export class SparkJobSubmissionInput { - public setSparkClusterInfo(sqlClusterConnection: SqlClusterConnection): void { - this._host = sqlClusterConnection.host; - this._port = sqlClusterConnection.port; - this._livyPath = constants.mssqlClusterLivySubmitPath; - this._user = sqlClusterConnection.user; - this._password = sqlClusterConnection.password; - this._isIntegratedAuth = sqlClusterConnection.isIntegratedAuth(); - } - - constructor( - public readonly config: SparkJobSubmissionConfig, - private _host?: string, - private _port?: number, - private _livyPath?: string, - private _user?: string, - private _password?: string, - private _isIntegratedAuth?: boolean) { } - - public get host(): string { return this._host; } - public get port(): number { return this._port; } - public get livyPath(): string { return this._livyPath; } - public get user(): string { return this._user; } - public get password(): string { return this._password; } - public get isIntegratedAuth(): boolean { return this._isIntegratedAuth; } -} - -export enum SparkFileSource { - HDFS = 'HDFS', - Local = 'Local' -} - -export class LivyLogResponse { - constructor(public log: string, public appId: string) { } -} diff --git a/extensions/mssql/src/sparkFeature/historyTask.ts b/extensions/mssql/src/sparkFeature/historyTask.ts deleted file mode 100644 index 0a0beee4e8..0000000000 --- a/extensions/mssql/src/sparkFeature/historyTask.ts +++ /dev/null @@ -1,43 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { AppContext } from '../appContext'; -import { getErrorMessage } from '../utils'; -import * as SqlClusterLookUp from '../sqlClusterLookUp'; -import * as loc from '../localizedConstants'; - -export class OpenSparkYarnHistoryTask { - constructor(private appContext: AppContext) { - } - - async execute(sqlConnProfile: azdata.IConnectionProfile, isSpark: boolean): Promise { - try { - let sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(sqlConnProfile, this.appContext); - if (!sqlClusterConnection) { - let name = isSpark ? 'Spark' : 'Yarn'; - void vscode.window.showErrorMessage(loc.sparkConnectionRequired(name)); - return; - } - if (isSpark) { - void vscode.commands.executeCommand('vscode.open', vscode.Uri.parse(this.generateSparkHistoryUrl(sqlClusterConnection.host, sqlClusterConnection.port))); - } - else { - void vscode.commands.executeCommand('vscode.open', vscode.Uri.parse(this.generateYarnHistoryUrl(sqlClusterConnection.host, sqlClusterConnection.port))); - } - } catch (error) { - void vscode.window.showErrorMessage(getErrorMessage(error)); - } - } - - private generateSparkHistoryUrl(host: string, port: number): string { - return `https://${host}:${port}/gateway/default/sparkhistory/`; - } - - private generateYarnHistoryUrl(host: string, port: number): string { - return `https://${host}:${port}/gateway/default/yarn/cluster/apps`; - } -} diff --git a/extensions/mssql/src/sqlClusterLookUp.ts b/extensions/mssql/src/sqlClusterLookUp.ts deleted file mode 100644 index 6f7eeff69d..0000000000 --- a/extensions/mssql/src/sqlClusterLookUp.ts +++ /dev/null @@ -1,249 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as bdc from 'bdc'; -import * as vscode from 'vscode'; -import * as constants from './constants'; -import * as UUID from 'vscode-languageclient/lib/utils/uuid'; -import { AppContext } from './appContext'; -import { SqlClusterConnection } from './objectExplorerNodeProvider/connection'; -import { ICommandObjectExplorerContext } from './objectExplorerNodeProvider/command'; -import { getClusterEndpoints, getHostAndPortFromEndpoint } from './utils'; -import { MssqlObjectExplorerNodeProvider } from './objectExplorerNodeProvider/objectExplorerNodeProvider'; -import CodeAdapter from './prompts/adapter'; -import { IQuestion, QuestionTypes } from './prompts/question'; -import * as nls from 'vscode-nls'; -import { AuthType } from './util/auth'; -const localize = nls.loadMessageBundle(); - -export async function findSqlClusterConnection( - obj: ICommandObjectExplorerContext | azdata.IConnectionProfile, - appContext: AppContext): Promise { - - if (!obj || !appContext) { - console.error('SqlClusterLookup::findSqlClusterConnection - No context available'); - return undefined; - } - - let sqlConnProfile: azdata.IConnectionProfile; - if ('type' in obj && obj.type === constants.ObjectExplorerService - && 'explorerContext' in obj && obj.explorerContext && obj.explorerContext.connectionProfile) { - sqlConnProfile = obj.explorerContext.connectionProfile; - } else if ('options' in obj) { - sqlConnProfile = obj; - } - - let sqlClusterConnection: SqlClusterConnection = undefined; - if (sqlConnProfile) { - sqlClusterConnection = await findSqlClusterConnectionBySqlConnProfile(sqlConnProfile, appContext); - } else { - console.error('SqlClusterLookup::findSqlClusterConnection - No connection profile'); - } - return sqlClusterConnection; -} - -async function findSqlClusterConnectionBySqlConnProfile(sqlConnProfile: azdata.IConnectionProfile, appContext: AppContext): Promise { - if (!sqlConnProfile || !appContext) { - console.error('SqlClusterLookup::findSqlClusterConnectionBySqlConnProfile - No context available'); - return undefined; - } - - let sqlOeNodeProvider = appContext.getService(constants.ObjectExplorerService); - if (!sqlOeNodeProvider) { - console.error('SqlClusterLookup::findSqlClusterConnectionBySqlConnProfile - No OE Node Provider available'); - return undefined; - } - - let sqlClusterSession = sqlOeNodeProvider.findSqlClusterSessionBySqlConnProfile(sqlConnProfile); - if (!sqlClusterSession) { - console.error('SqlClusterLookup::findSqlClusterConnectionBySqlConnProfile - No SQL Cluster Session found'); - return undefined; - } - - return sqlClusterSession.getSqlClusterConnection(); -} - -export async function getSqlClusterConnectionParams( - obj: azdata.IConnectionProfile | azdata.connection.Connection | ICommandObjectExplorerContext, - appContext: AppContext): Promise { - - if (!obj) { return undefined; } - - let sqlClusterConnInfo: ConnectionParam = undefined; - if ('providerName' in obj) { - if (obj.providerName === constants.mssqlClusterProviderName) { - sqlClusterConnInfo = 'id' in obj ? connProfileToConnectionParam(obj) : connToConnectionParam(obj); - } else { - sqlClusterConnInfo = await createSqlClusterConnInfo(obj, appContext); - } - } else { - sqlClusterConnInfo = await createSqlClusterConnInfo(obj.explorerContext.connectionProfile, appContext); - } - - return sqlClusterConnInfo; -} - -async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile | azdata.connection.Connection, appContext: AppContext): Promise { - if (!sqlConnInfo) { return undefined; } - - let connectionId: string = 'id' in sqlConnInfo ? sqlConnInfo.id : sqlConnInfo.connectionId; - if (!connectionId) { return undefined; } - - let serverInfo = await azdata.connection.getServerInfo(connectionId); - if (!serverInfo || !serverInfo.options) { return undefined; } - - let endpoints: bdc.IEndpointModel[] = getClusterEndpoints(serverInfo); - if (!endpoints || endpoints.length === 0) { return undefined; } - - let credentials = await azdata.connection.getCredentials(connectionId); - if (!credentials) { return undefined; } - - let clusterConnInfo = { - providerName: constants.mssqlClusterProviderName, - connectionId: UUID.generateUuid(), - options: {} - }; - - // We need to populate some extra information here in order to be able to browse the HDFS nodes. - // First - if the auth type isn't integrated auth then we need to try and find the username to connect - // to the knox endpoint with. - // Next we need the knox endpoint - if we didn't get that from the SQL instance (because the user didn't have permissions - // to see the full DMV usually) then we need to connect to the controller to fetch the full list of endpoints and get it - // that way. - let clusterController: bdc.IClusterController | undefined = undefined; - let authType = clusterConnInfo.options[constants.authenticationTypePropName] = sqlConnInfo.options[constants.authenticationTypePropName]; - const controllerEndpoint = endpoints.find(ep => ep.name.toLowerCase() === 'controller'); - if (authType && authType.toLowerCase() !== constants.integratedAuth) { - const usernameKey = `bdc.username::${connectionId}`; - const savedUsername = appContext.extensionContext.globalState.get(usernameKey); - const credentialProvider = await azdata.credentials.getProvider('mssql.bdc.password'); - const savedPassword = (await credentialProvider.readCredential(connectionId)).password; - // If we don't have a previously saved username/password then use the SQL connection credentials as a best guess, - // if those don't work then we'll prompt the user for the info - clusterConnInfo.options[constants.userPropName] = savedUsername ?? sqlConnInfo.options[constants.userPropName]; - clusterConnInfo.options[constants.passwordPropName] = savedPassword ?? credentials.password; - try { - clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo); - // We've successfully connected so now store the username/password for future connections - await appContext.extensionContext.globalState.update(usernameKey, clusterConnInfo.options[constants.userPropName]); - await credentialProvider.saveCredential(connectionId, clusterConnInfo.options[constants.passwordPropName]); - clusterConnInfo.options[constants.userPropName] = await clusterController.getKnoxUsername(clusterConnInfo.options[constants.userPropName]); - } catch (err) { - console.log(`Unexpected error getting Knox username for SQL Cluster connection: ${err}`); - throw err; - } - } - - let hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase()); - if (hadoopEndpointIndex < 0) { - clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo); - endpoints = (await clusterController.getEndPoints()).endPoints; - hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase()); - } - const hostAndIp = getHostAndPortFromEndpoint(endpoints[hadoopEndpointIndex].endpoint); - clusterConnInfo.options[constants.hostPropName] = hostAndIp.host; - // TODO should we default the port? Or just ignore later? - clusterConnInfo.options[constants.knoxPortPropName] = hostAndIp.port || constants.defaultKnoxPort; - clusterConnInfo = connToConnectionParam(clusterConnInfo); - - return clusterConnInfo; -} - -async function getClusterController(controllerEndpoint: string, connInfo: ConnectionParam): Promise { - const bdcApi = await vscode.extensions.getExtension(bdc.constants.extensionName).activate(); - const authType: bdc.AuthType = connInfo.options[constants.authenticationTypePropName].toLowerCase() === AuthType.Integrated ? 'integrated' : 'basic'; - const controller = bdcApi.getClusterController( - controllerEndpoint, - authType, - connInfo.options[constants.userPropName], - connInfo.options[constants.passwordPropName]); - try { - // We just want to test the connection - so using getEndpoints since that is available to all users (not just admin) - await controller.getEndPoints(); - return controller; - } catch (err) { - // Initial username/password failed so prompt user for username password until either user - // cancels out or we successfully connect - console.log(`Error connecting to cluster controller: ${err}`); - let errorMessage = ''; - const prompter = new CodeAdapter(); - while (true) { - let username = await prompter.promptSingle({ - type: QuestionTypes.input, - name: 'inputPrompt', - message: localize('promptBDCUsername', "{0}Please provide the username to connect to the BDC Controller:", errorMessage), - default: connInfo.options[constants.userPropName] - }); - if (!username) { - console.log(`User cancelled out of username prompt for BDC Controller`); - break; - } - const password = await prompter.promptSingle({ - type: QuestionTypes.password, - name: 'passwordPrompt', - message: localize('promptBDCPassword', "Please provide the password to connect to the BDC Controller"), - default: '' - }); - if (!password) { - console.log(`User cancelled out of password prompt for BDC Controller`); - break; - } - const controller = bdcApi.getClusterController(controllerEndpoint, authType, username, password); - try { - // We just want to test the connection - so using getEndpoints since that is available to all users (not just admin) - await controller.getEndPoints(); - // Update our connection with the new info - connInfo.options[constants.userPropName] = username; - connInfo.options[constants.passwordPropName] = password; - return controller; - } catch (err) { - errorMessage = localize('bdcConnectError', "Error: {0}. ", err.message ?? err); - } - } - throw new Error(localize('usernameAndPasswordRequired', "Username and password are required")); - } - -} -function connProfileToConnectionParam(connectionProfile: azdata.IConnectionProfile): ConnectionParam { - let result = Object.assign(connectionProfile, { connectionId: connectionProfile.id }); - return result; -} - -function connToConnectionParam(connection: azdata.connection.Connection): ConnectionParam { - let connectionId = connection.connectionId; - let options = connection.options; - let result = Object.assign(connection, - { - serverName: `${options[constants.hostPropName]},${options[constants.knoxPortPropName]}`, - userName: options[constants.userPropName], - password: options[constants.passwordPropName], - id: connectionId, - authenticationType: options[constants.authenticationTypePropName] - } - ); - return result; -} - -class ConnectionParam implements azdata.connection.Connection, azdata.IConnectionProfile, azdata.ConnectionInfo { - public connectionName: string; - public serverName: string; - public databaseName: string; - public userName: string; - public password: string; - public authenticationType: string; - public savePassword: boolean; - public groupFullName: string; - public groupId: string; - public saveProfile: boolean; - public id: string; - public azureTenantId?: string; - public azureAccount?: string; - - public providerName: string; - public connectionId: string; - - public options: { [name: string]: any; }; -} diff --git a/extensions/mssql/src/typings/refs.d.ts b/extensions/mssql/src/typings/refs.d.ts index ef1b84487e..062ae88d5c 100644 --- a/extensions/mssql/src/typings/refs.d.ts +++ b/extensions/mssql/src/typings/refs.d.ts @@ -7,4 +7,3 @@ /// /// /// -/// diff --git a/extensions/mssql/src/util/auth.ts b/extensions/mssql/src/util/auth.ts deleted file mode 100644 index 0106eaf998..0000000000 --- a/extensions/mssql/src/util/auth.ts +++ /dev/null @@ -1,37 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as kerberos from '@microsoft/ads-kerberos'; -import * as vscode from 'vscode'; - -export enum AuthType { - Integrated = 'integrated', - Basic = 'basic' -} - -export async function authenticateKerberos(hostname: string): Promise { - const service = 'HTTP' + (process.platform === 'win32' ? '/' : '@') + hostname; - const mechOID = kerberos.GSS_MECH_OID_KRB5; - let client = await kerberos.initializeClient(service, { mechOID }); - let response = await client.step(''); - return response; -} - -const bdcConfigSectionName = 'bigDataCluster'; -const ignoreSslConfigName = 'ignoreSslVerification'; - -/** - * Retrieves the current setting for whether to ignore SSL verification errors - */ -export function getIgnoreSslVerificationConfigSetting(): boolean { - try { - const config = vscode.workspace.getConfiguration(bdcConfigSectionName); - return config.get(ignoreSslConfigName, true); - } catch (error) { - console.error(`Unexpected error retrieving ${bdcConfigSectionName}.${ignoreSslConfigName} setting : ${error}`); - } - return true; -} - diff --git a/extensions/mssql/src/utils.ts b/extensions/mssql/src/utils.ts index f4f5b5dcbe..570ec76618 100644 --- a/extensions/mssql/src/utils.ts +++ b/extensions/mssql/src/utils.ts @@ -5,12 +5,10 @@ import * as azdata from 'azdata'; import * as vscode from 'vscode'; -import * as bdc from 'bdc'; import * as path from 'path'; import * as crypto from 'crypto'; import * as os from 'os'; import * as findRemoveSync from 'find-remove'; -import * as constants from './constants'; import { promises as fs } from 'fs'; import { IConfig, ServerProvider } from '@microsoft/ads-service-downloader'; import { env } from 'process'; @@ -273,56 +271,6 @@ export function getUserHome(): string { return process.env.HOME || process.env.USERPROFILE; } -export function getClusterEndpoints(serverInfo: azdata.ServerInfo): bdc.IEndpointModel[] | undefined { - let endpoints: RawEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty]; - if (!endpoints || endpoints.length === 0) { return []; } - - return endpoints.map(e => { - // If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS - let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`; - let updatedEndpoint: bdc.IEndpointModel = { - name: e.serviceName, - description: e.description, - endpoint: endpoint, - protocol: e.protocol - }; - return updatedEndpoint; - }); -} - -export async function isBigDataCluster(connectionId: string): Promise { - const serverInfo = await azdata.connection.getServerInfo(connectionId); - - return !!serverInfo?.options?.[constants.isBigDataClusterProperty]; -} - -export type HostAndIp = { host: string, port: string }; - -export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp { - let authority = vscode.Uri.parse(endpoint).authority; - let hostAndPortRegex = /^(.*)([,:](\d+))/g; - let match = hostAndPortRegex.exec(authority); - if (match) { - return { - host: match[1], - port: match[3] - }; - } - return { - host: authority, - port: undefined - }; -} - -interface RawEndpoint { - serviceName: string; - description?: string; - endpoint?: string; - protocol?: string; - ipAddress?: string; - port?: number; -} - export function isValidNumber(maybeNumber: any) { return maybeNumber !== undefined && maybeNumber !== null diff --git a/extensions/notebook/kernels/pysparkkernel/kernel.json b/extensions/notebook/kernels/pysparkkernel/kernel.json deleted file mode 100644 index 96a4fb076a..0000000000 --- a/extensions/notebook/kernels/pysparkkernel/kernel.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "argv": [ - "python", - "-m", - "sparkmagic.kernels.pysparkkernel.pysparkkernel", - "-f", - "{connection_file}" - ], - "display_name": "PySpark" -} \ No newline at end of file diff --git a/extensions/notebook/kernels/sparkkernel/kernel.json b/extensions/notebook/kernels/sparkkernel/kernel.json deleted file mode 100644 index c4f8006c06..0000000000 --- a/extensions/notebook/kernels/sparkkernel/kernel.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "argv": [ - "python", - "-m", - "sparkmagic.kernels.sparkkernel.sparkkernel", - "-f", - "{connection_file}" - ], - "display_name": "Spark | Scala" -} \ No newline at end of file diff --git a/extensions/notebook/kernels/sparkrkernel/kernel.json b/extensions/notebook/kernels/sparkrkernel/kernel.json deleted file mode 100644 index 9bb2d5829f..0000000000 --- a/extensions/notebook/kernels/sparkrkernel/kernel.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "argv": [ - "python", - "-m", - "sparkmagic.kernels.sparkrkernel.sparkrkernel", - "-f", - "{connection_file}" - ], - "display_name": "Spark | R" -} \ No newline at end of file diff --git a/extensions/notebook/package.json b/extensions/notebook/package.json index 4278311614..e2526ae134 100644 --- a/extensions/notebook/package.json +++ b/extensions/notebook/package.json @@ -94,10 +94,6 @@ } }, "commands": [ - { - "command": "notebook.command.analyzeNotebook", - "title": "%notebook.analyzeJupyterNotebook%" - }, { "command": "notebook.command.open", "title": "%notebook.command.open%" @@ -128,10 +124,6 @@ "title": "%notebook.command.addcell%", "icon": "resources/dark/touchbar_add_cell.png" }, - { - "command": "jupyter.cmd.analyzeNotebook", - "title": "%title.analyzeJupyterNotebook%" - }, { "command": "jupyter.task.newNotebook", "title": "%title.newJupyterNotebook%", @@ -334,10 +326,6 @@ ], "menus": { "commandPalette": [ - { - "command": "notebook.command.analyzeNotebook", - "when": "false" - }, { "command": "notebook.command.open" }, @@ -373,10 +361,6 @@ "command": "jupyter.cmd.newNotebook", "when": "false" }, - { - "command": "jupyter.cmd.analyzeNotebook", - "when": "false" - }, { "command": "jupyter.task.openNotebook", "when": "false" @@ -486,18 +470,6 @@ "group": "1_notebook@2" } ], - "objectExplorer/item/context": [ - { - "command": "notebook.command.analyzeNotebook", - "when": "nodeType=~/^mssqlCluster/ && nodeLabel=~/[^\\s]+(\\.(csv|tsv|txt))$/ && nodeType == mssqlCluster:file", - "group": "1notebook@1" - }, - { - "command": "jupyter.cmd.analyzeNotebook", - "when": "nodeType=~/^hdfs/ && nodeLabel=~/[^\\s]+(\\.(csv|tsv|txt))$/ && nodeType == hdfs:file", - "group": "1notebook@1" - } - ], "view/item/context": [ { "command": "notebook.command.trustBook", @@ -670,14 +642,6 @@ "sql" ] }, - { - "magic": "lang_r", - "language": "r", - "executionTarget": null, - "kernels": [ - "sql" - ] - }, { "magic": "lang_java", "language": "java", @@ -694,39 +658,6 @@ ".ipynb" ], "standardKernels": [ - { - "name": "pysparkkernel", - "displayName": "PySpark", - "connectionProviderIds": [ - "MSSQL" - ], - "blockedOnSAW": true, - "supportedLanguages": [ - "python" - ] - }, - { - "name": "sparkkernel", - "displayName": "Spark | Scala", - "connectionProviderIds": [ - "MSSQL" - ], - "supportedLanguages": [ - "scala" - ], - "blockedOnSAW": true - }, - { - "name": "sparkrkernel", - "displayName": "Spark | R", - "connectionProviderIds": [ - "MSSQL" - ], - "supportedLanguages": [ - "r" - ], - "blockedOnSAW": true - }, { "name": "python3", "displayName": "Python 3", diff --git a/extensions/notebook/src/common/constants.ts b/extensions/notebook/src/common/constants.ts index 9c5b6a2736..1430f189a6 100644 --- a/extensions/notebook/src/common/constants.ts +++ b/extensions/notebook/src/common/constants.ts @@ -36,7 +36,6 @@ export const jupyterNewNotebookTask = 'jupyter.task.newNotebook'; export const jupyterOpenNotebookTask = 'jupyter.task.openNotebook'; export const jupyterNewNotebookCommand = 'jupyter.cmd.newNotebook'; export const jupyterReinstallDependenciesCommand = 'jupyter.reinstallDependencies'; -export const jupyterAnalyzeCommand = 'jupyter.cmd.analyzeNotebook'; export const jupyterManagePackages = 'jupyter.cmd.managePackages'; export const jupyterConfigurePython = 'jupyter.cmd.configurePython'; export const localhostName = 'localhost'; @@ -45,9 +44,6 @@ export const PackageNotFoundError = localize('managePackages.packageNotFound', " export const ipykernelDisplayName = 'Python 3 (ipykernel)'; export const python3DisplayName = 'Python 3'; -export const pysparkDisplayName = 'PySpark'; -export const sparkScalaDisplayName = 'Spark | Scala'; -export const sparkRDisplayName = 'Spark | R'; export const powershellDisplayName = 'PowerShell'; export const allKernelsName = 'All Kernels'; @@ -84,15 +80,6 @@ export const pythonWindowsInstallUrl = 'https://go.microsoft.com/fwlink/?linkid= export const pythonMacInstallUrl = 'https://go.microsoft.com/fwlink/?linkid=2163337'; export const pythonLinuxInstallUrl = 'https://go.microsoft.com/fwlink/?linkid=2163336'; -export const KNOX_ENDPOINT_SERVER = 'host'; -export const KNOX_ENDPOINT_PORT = 'knoxport'; -export const KNOX_ENDPOINT_GATEWAY = 'gateway'; -export const CONTROLLER_ENDPOINT = 'controller'; -export const SQL_PROVIDER = 'MSSQL'; -export const USER = 'user'; -export const AUTHTYPE = 'authenticationType'; -export const INTEGRATED_AUTH = 'integrated'; - // The version of the notebook file format that we support export const NBFORMAT = 4; export const NBFORMAT_MINOR = 2; diff --git a/extensions/notebook/src/common/localizedConstants.ts b/extensions/notebook/src/common/localizedConstants.ts index 707d77f8ed..c4297f1d46 100644 --- a/extensions/notebook/src/common/localizedConstants.ts +++ b/extensions/notebook/src/common/localizedConstants.ts @@ -12,8 +12,6 @@ export const msgNo = localize('msgNo', "No"); // Jupyter Constants /////////////////////////////////////////////////////// export const msgSampleCodeDataFrame = localize('msgSampleCodeDataFrame', "This sample code loads the file into a data frame and shows the first 10 results."); -export const noBDCConnectionError = localize('noBDCConnectionError', "Spark kernels require a connection to a SQL Server Big Data Cluster master instance."); -export const providerNotValidError = localize('providerNotValidError', "Non-MSSQL providers are not supported for spark kernels."); // Book view-let constants export const allFiles = localize('allFiles', "All Files"); diff --git a/extensions/notebook/src/common/notebookUtils.ts b/extensions/notebook/src/common/notebookUtils.ts index d0c0e3e665..2b20934668 100644 --- a/extensions/notebook/src/common/notebookUtils.ts +++ b/extensions/notebook/src/common/notebookUtils.ts @@ -4,15 +4,12 @@ *--------------------------------------------------------------------------------------------*/ import * as azdata from 'azdata'; -import * as os from 'os'; import * as vscode from 'vscode'; import * as nls from 'vscode-nls'; import { getErrorMessage } from '../common/utils'; const localize = nls.loadMessageBundle(); -const JUPYTER_NOTEBOOK_PROVIDER = 'jupyter'; -const msgSampleCodeDataFrame = localize('msgSampleCodeDataFrame', "This sample code loads the file into a data frame and shows the first 10 results."); const noNotebookVisible = localize('noNotebookVisible', "No notebook editor is active"); export class NotebookUtils { @@ -96,31 +93,4 @@ export class NotebookUtils { public async toggleMarkdownStyle(style: string, showUI?: boolean, value?: string): Promise { return vscode.commands.executeCommand(style, showUI, value); } - - public async analyzeNotebook(oeContext?: azdata.ObjectExplorerContext): Promise { - let editor = await azdata.nb.showNotebookDocument(vscode.Uri.from({ scheme: 'untitled' }), { - connectionProfile: oeContext ? oeContext.connectionProfile : undefined, - providerId: JUPYTER_NOTEBOOK_PROVIDER, - preview: false, - defaultKernel: { - name: 'pysparkkernel', - display_name: 'PySpark', - language: 'python' - } - }); - if (oeContext && oeContext.nodeInfo && oeContext.nodeInfo.nodePath) { - // Get the file path after '/HDFS' - let hdfsPath: string = oeContext.nodeInfo.nodePath.substring(oeContext.nodeInfo.nodePath.indexOf('/HDFS') + '/HDFS'.length); - if (hdfsPath.length > 0) { - let analyzeCommand = '#' + msgSampleCodeDataFrame + os.EOL + 'df = (spark.read.option("inferSchema", "true")' - + os.EOL + '.option("header", "true")' + os.EOL + '.csv("{0}"))' + os.EOL + 'df.show(10)'; - await editor.edit(editBuilder => { - editBuilder.insertCell({ - cell_type: 'code', - source: analyzeCommand.replace('{0}', hdfsPath) - }, 0); - }); - } - } - } } diff --git a/extensions/notebook/src/common/utils.ts b/extensions/notebook/src/common/utils.ts index 1f5015f597..6cf9fd5b0c 100644 --- a/extensions/notebook/src/common/utils.ts +++ b/extensions/notebook/src/common/utils.ts @@ -3,28 +3,19 @@ * Licensed under the Source EULA. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ -import * as bdc from 'bdc'; import * as childProcess from 'child_process'; import * as fs from 'fs-extra'; import * as nls from 'vscode-nls'; import * as vscode from 'vscode'; import * as azdata from 'azdata'; import * as crypto from 'crypto'; -import { notebookConfigKey, pinnedBooksConfigKey, AUTHTYPE, INTEGRATED_AUTH, KNOX_ENDPOINT_PORT, KNOX_ENDPOINT_SERVER } from './constants'; +import { notebookConfigKey, pinnedBooksConfigKey } from './constants'; import { IPrompter, IQuestion, QuestionTypes } from '../prompts/question'; import { BookTreeItemFormat } from '../book/bookTreeItem'; import * as loc from './localizedConstants'; const localize = nls.loadMessageBundle(); -export function getKnoxUrl(host: string, port: string): string { - return `https://${host}:${port}/gateway`; -} - -export function getLivyUrl(serverName: string, port: string): string { - return this.getKnoxUrl(serverName, port) + '/default/livy/v1/'; -} - export async function ensureDir(dirPath: string, outputChannel?: vscode.OutputChannel): Promise { outputChannel?.appendLine(localize('ensureDirOutputMsg', "... Ensuring {0} exists", dirPath)); await fs.ensureDir(dirPath); @@ -109,15 +100,6 @@ export enum Platform { Others } -interface RawEndpoint { - serviceName: string; - description?: string; - endpoint?: string; - protocol?: string; - ipAddress?: string; - port?: number; -} - export function getOSPlatformId(): string { let platformId = undefined; switch (process.platform) { @@ -268,61 +250,6 @@ export function isPackageSupported(pythonVersion: string, packageVersionConstrai return supportedVersionFound; } -export function getClusterEndpoints(serverInfo: azdata.ServerInfo): bdc.IEndpointModel[] { - let endpoints: RawEndpoint[] = serverInfo.options['clusterEndpoints']; - if (!endpoints || endpoints.length === 0) { return []; } - - return endpoints.map(e => { - // If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS - let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`; - let updatedEndpoint: bdc.IEndpointModel = { - name: e.serviceName, - description: e.description, - endpoint: endpoint, - protocol: e.protocol - }; - return updatedEndpoint; - }); -} - -export type HostAndIp = { host: string, port: string }; - -export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp { - let authority = vscode.Uri.parse(endpoint).authority; - let hostAndPortRegex = /^(.*)([,:](\d+))/g; - let match = hostAndPortRegex.exec(authority); - if (match) { - return { - host: match[1], - port: match[3] - }; - } - return { - host: authority, - port: undefined - }; -} - -export function isIntegratedAuth(connection: azdata.IConnectionProfile): boolean { - return connection.options[AUTHTYPE] && connection.options[AUTHTYPE].toLowerCase() === INTEGRATED_AUTH.toLowerCase(); -} - -export function isSparkKernel(kernelName: string): boolean { - return kernelName && kernelName.toLowerCase().indexOf('spark') > -1; -} - -export function setHostAndPort(delimeter: string, connection: azdata.IConnectionProfile): void { - let originalHost = connection.options[KNOX_ENDPOINT_SERVER]; - if (!originalHost) { - return; - } - let index = originalHost.indexOf(delimeter); - if (index > -1) { - connection.options[KNOX_ENDPOINT_SERVER] = originalHost.slice(0, index); - connection.options[KNOX_ENDPOINT_PORT] = originalHost.slice(index + 1); - } -} - export async function exists(path: string): Promise { try { await fs.access(path); @@ -332,22 +259,6 @@ export async function exists(path: string): Promise { } } -const bdcConfigSectionName = 'bigDataCluster'; -const ignoreSslConfigName = 'ignoreSslVerification'; - -/** - * Retrieves the current setting for whether to ignore SSL verification errors - */ -export function getIgnoreSslVerificationConfigSetting(): boolean { - try { - const config = vscode.workspace.getConfiguration(bdcConfigSectionName); - return config.get(ignoreSslConfigName, true); - } catch (error) { - console.error('Unexpected error retrieving ${bdcConfigSectionName}.${ignoreSslConfigName} setting : ', error); - } - return true; -} - export function debounce(delay: number): Function { return decorate((fn, key) => { const timerKey = `$debounce$${key}`; diff --git a/extensions/notebook/src/dialog/configurePython/pickPackagesPage.ts b/extensions/notebook/src/dialog/configurePython/pickPackagesPage.ts index d978173cb0..1dd2a1067e 100644 --- a/extensions/notebook/src/dialog/configurePython/pickPackagesPage.ts +++ b/extensions/notebook/src/dialog/configurePython/pickPackagesPage.ts @@ -7,7 +7,7 @@ import * as azdata from 'azdata'; import * as nls from 'vscode-nls'; import { BasePage } from './basePage'; import { JupyterServerInstallation } from '../../jupyter/jupyterServerInstallation'; -import { python3DisplayName, pysparkDisplayName, sparkScalaDisplayName, sparkRDisplayName, powershellDisplayName, allKernelsName } from '../../common/constants'; +import { python3DisplayName, powershellDisplayName, allKernelsName } from '../../common/constants'; import { getDropdownValue } from '../../common/utils'; const localize = nls.loadMessageBundle(); @@ -39,7 +39,7 @@ export class PickPackagesPage extends BasePage { value: this.model.kernelName }).component(); } else { - let dropdownValues = [python3DisplayName, pysparkDisplayName, sparkScalaDisplayName, sparkRDisplayName, powershellDisplayName, allKernelsName]; + let dropdownValues = [python3DisplayName, powershellDisplayName, allKernelsName]; this.kernelDropdown = this.view.modelBuilder.dropDown().withProps({ value: dropdownValues[0], values: dropdownValues, diff --git a/extensions/notebook/src/extension.ts b/extensions/notebook/src/extension.ts index a914a740e6..311c2bf96e 100644 --- a/extensions/notebook/src/extension.ts +++ b/extensions/notebook/src/extension.ts @@ -128,9 +128,6 @@ export async function activate(extensionContext: vscode.ExtensionContext): Promi extensionContext.subscriptions.push(vscode.commands.registerCommand('notebook.command.addtext', async () => { await appContext.notebookUtils.addCell('markdown'); })); - extensionContext.subscriptions.push(vscode.commands.registerCommand('notebook.command.analyzeNotebook', async (explorerContext: azdata.ObjectExplorerContext) => { - await appContext.notebookUtils.analyzeNotebook(explorerContext); - })); extensionContext.subscriptions.push(vscode.window.registerUriHandler(new NotebookUriHandler())); extensionContext.subscriptions.push(vscode.commands.registerCommand('books.command.openLocalizedBooks', async () => { diff --git a/extensions/notebook/src/integrationTest/notebookIntegration.test.ts b/extensions/notebook/src/integrationTest/notebookIntegration.test.ts index ee74c6ebaf..f34ff856b8 100644 --- a/extensions/notebook/src/integrationTest/notebookIntegration.test.ts +++ b/extensions/notebook/src/integrationTest/notebookIntegration.test.ts @@ -58,7 +58,7 @@ describe('Notebook Extension Python Installation', function () { console.log('Uninstalling existing pip dependencies'); let install = jupyterController.jupyterInstallation; let pythonExe = JupyterServerInstallation.getPythonExePath(pythonInstallDir); - let command = `"${pythonExe}" -m pip uninstall -y jupyter pandas sparkmagic`; + let command = `"${pythonExe}" -m pip uninstall -y jupyter`; await executeStreamedCommand(command, { env: install.execOptions.env }, install.outputChannel); console.log('Uninstalling existing pip dependencies is done'); @@ -84,15 +84,15 @@ describe('Notebook Extension Python Installation', function () { let testPkgVersion = '0.24.2'; let expectedPkg: PythonPkgDetails = { name: testPkg, version: testPkgVersion }; - await install.installPipPackages([{ name: testPkg, version: testPkgVersion}], false); + await install.installPipPackages([{ name: testPkg, version: testPkgVersion }], false); let packages = await install.getInstalledPipPackages(); should(packages).containEql(expectedPkg); - await install.uninstallPipPackages([{ name: testPkg, version: testPkgVersion}]); + await install.uninstallPipPackages([{ name: testPkg, version: testPkgVersion }]); packages = await install.getInstalledPipPackages(); should(packages).not.containEql(expectedPkg); - await install.installPipPackages([{ name: testPkg, version: testPkgVersion}], false); + await install.installPipPackages([{ name: testPkg, version: testPkgVersion }], false); packages = await install.getInstalledPipPackages(); should(packages).containEql(expectedPkg); }); diff --git a/extensions/notebook/src/jupyter/jupyterController.ts b/extensions/notebook/src/jupyter/jupyterController.ts index 5995421c82..237b9f2176 100644 --- a/extensions/notebook/src/jupyter/jupyterController.ts +++ b/extensions/notebook/src/jupyter/jupyterController.ts @@ -6,12 +6,10 @@ import * as path from 'path'; import * as azdata from 'azdata'; import * as vscode from 'vscode'; -import * as os from 'os'; import * as nls from 'vscode-nls'; const localize = nls.loadMessageBundle(); import * as constants from '../common/constants'; -import * as localizedConstants from '../common/localizedConstants'; import { JupyterServerInstallation } from './jupyterServerInstallation'; import * as utils from '../common/utils'; import { IPrompter, IQuestion, QuestionTypes } from '../prompts/question'; @@ -66,9 +64,6 @@ export class JupyterController { vscode.commands.registerCommand(constants.jupyterNewNotebookCommand, (explorerContext: azdata.ObjectExplorerContext) => { return this.saveProfileAndCreateNotebook(explorerContext ? explorerContext.connectionProfile : undefined); }); - vscode.commands.registerCommand(constants.jupyterAnalyzeCommand, (explorerContext: azdata.ObjectExplorerContext) => { - return this.saveProfileAndAnalyzeNotebook(explorerContext); - }); vscode.commands.registerCommand(constants.jupyterReinstallDependenciesCommand, () => { return this.handleDependenciesReinstallation(); }); vscode.commands.registerCommand(constants.jupyterManagePackages, async (args) => { return this.doManagePackages(args); }); @@ -93,11 +88,7 @@ export class JupyterController { } private saveProfileAndCreateNotebook(profile: azdata.IConnectionProfile): Promise { - return this.handleNewNotebookTask(undefined, profile); - } - - private saveProfileAndAnalyzeNotebook(oeContext: azdata.ObjectExplorerContext): Promise { - return this.handleNewNotebookTask(oeContext, oeContext.connectionProfile); + return this.handleNewNotebookTask(profile); } // EVENT HANDLERS ////////////////////////////////////////////////////// @@ -130,34 +121,17 @@ export class JupyterController { } } - private async handleNewNotebookTask(oeContext?: azdata.ObjectExplorerContext, profile?: azdata.IConnectionProfile): Promise { - let editor = await azdata.nb.showNotebookDocument(vscode.Uri.from({ scheme: 'untitled' }), { + private async handleNewNotebookTask(profile?: azdata.IConnectionProfile): Promise { + await azdata.nb.showNotebookDocument(vscode.Uri.from({ scheme: 'untitled' }), { connectionProfile: profile, providerId: constants.jupyterNotebookProviderId, preview: false, defaultKernel: { - name: 'pysparkkernel', - display_name: 'PySpark', + name: 'python3', + display_name: 'Python 3', language: 'python' } }); - if (oeContext && oeContext.nodeInfo && oeContext.nodeInfo.nodePath) { - // Get the file path after '/HDFS' - let hdfsPath: string = oeContext.nodeInfo.nodePath.substring(oeContext.nodeInfo.nodePath.indexOf('/HDFS') + '/HDFS'.length); - if (hdfsPath.length > 0) { - let analyzeCommand = '#' + localizedConstants.msgSampleCodeDataFrame + os.EOL + 'df = (spark.read.option(\"inferSchema\", \"true\")' - + os.EOL + '.option(\"header\", \"true\")' + os.EOL + '.csv(\'{0}\'))' + os.EOL + 'df.show(10)'; - // TODO re-enable insert into document once APIs are finalized. - // editor.document.cells[0].source = [analyzeCommand.replace('{0}', hdfsPath)]; - await editor.edit(editBuilder => { - editBuilder.replace(0, { - cell_type: 'code', - source: analyzeCommand.replace('{0}', hdfsPath) - }); - }); - - } - } } private async handleDependenciesReinstallation(): Promise { diff --git a/extensions/notebook/src/jupyter/jupyterKernel.ts b/extensions/notebook/src/jupyter/jupyterKernel.ts index 4c0df7ba06..0fd1814f98 100644 --- a/extensions/notebook/src/jupyter/jupyterKernel.ts +++ b/extensions/notebook/src/jupyter/jupyterKernel.ts @@ -60,12 +60,6 @@ export class JupyterKernel implements nb.IKernel { return true; } - public get requiresConnection(): boolean { - // TODO would be good to have a smarter way to do this. - // for now only Spark kernels need a connection - return !!(this.kernelImpl.name && this.kernelImpl.name.toLowerCase().indexOf('spark') > -1); - } - public get isReady(): boolean { return this.kernelImpl.isReady; } diff --git a/extensions/notebook/src/jupyter/jupyterServerInstallation.ts b/extensions/notebook/src/jupyter/jupyterServerInstallation.ts index 5cb278b1ca..e0a72fc1bf 100644 --- a/extensions/notebook/src/jupyter/jupyterServerInstallation.ts +++ b/extensions/notebook/src/jupyter/jupyterServerInstallation.ts @@ -88,22 +88,6 @@ export const requiredPowershellPkg: PythonPkgDetails = { version: '0.1.4' }; -export const requiredSparkPackages: PythonPkgDetails[] = [ - requiredJupyterPkg, - { - name: 'cryptography', - version: '3.2.1', - installExactVersion: true - }, - { - name: 'sparkmagic', - version: '0.12.9' - }, { - name: 'pandas', - version: '0.24.2' - } -]; - export class JupyterServerInstallation implements IJupyterServerInstallation { public extensionPath: string; public pythonBinPath: string; @@ -162,11 +146,8 @@ export class JupyterServerInstallation implements IJupyterServerInstallation { this._requiredKernelPackages.set(constants.ipykernelDisplayName, [requiredJupyterPkg]); this._requiredKernelPackages.set(constants.python3DisplayName, [requiredJupyterPkg]); this._requiredKernelPackages.set(constants.powershellDisplayName, [requiredJupyterPkg, requiredPowershellPkg]); - this._requiredKernelPackages.set(constants.pysparkDisplayName, requiredSparkPackages); - this._requiredKernelPackages.set(constants.sparkScalaDisplayName, requiredSparkPackages); - this._requiredKernelPackages.set(constants.sparkRDisplayName, requiredSparkPackages); - let allPackages = requiredSparkPackages.concat(requiredPowershellPkg); + let allPackages = [requiredJupyterPkg, requiredPowershellPkg]; this._requiredKernelPackages.set(constants.allKernelsName, allPackages); this._requiredPackagesSet = new Set(); diff --git a/extensions/notebook/src/jupyter/jupyterSessionManager.ts b/extensions/notebook/src/jupyter/jupyterSessionManager.ts index 3010ade62d..4f96883f58 100644 --- a/extensions/notebook/src/jupyter/jupyterSessionManager.ts +++ b/extensions/notebook/src/jupyter/jupyterSessionManager.ts @@ -3,61 +3,16 @@ * Licensed under the Source EULA. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ -import { nb, ServerInfo, connection, IConnectionProfile, credentials } from 'azdata'; +import { nb, IConnectionProfile } from 'azdata'; import { Session, Kernel } from '@jupyterlab/services'; -import * as fs from 'fs-extra'; import * as nls from 'vscode-nls'; import * as vscode from 'vscode'; import * as path from 'path'; -import * as utils from '../common/utils'; const localize = nls.loadMessageBundle(); import { JupyterKernel } from './jupyterKernel'; import { Deferred } from '../common/promise'; import { JupyterServerInstallation } from './jupyterServerInstallation'; -import * as bdc from 'bdc'; -import { noBDCConnectionError, providerNotValidError } from '../common/localizedConstants'; -import { SQL_PROVIDER, CONTROLLER_ENDPOINT, KNOX_ENDPOINT_GATEWAY, KNOX_ENDPOINT_SERVER, KNOX_ENDPOINT_PORT } from '../common/constants'; -import CodeAdapter from '../prompts/adapter'; -import { IQuestion, QuestionTypes } from '../prompts/question'; -import { ExtensionContextHelper } from '../common/extensionContextHelper'; -import Logger from '../common/logger'; - -const configBase = { - 'kernel_python_credentials': { - 'url': '' - }, - 'kernel_scala_credentials': { - 'url': '' - }, - 'kernel_r_credentials': { - 'url': '' - }, - 'livy_session_startup_timeout_seconds': 100, - 'logging_config': { - 'version': 1, - 'formatters': { - 'magicsFormatter': { - 'format': '%(asctime)s\t%(levelname)s\t%(message)s', - 'datefmt': '' - } - }, - 'handlers': { - 'magicsHandler': { - 'class': 'hdijupyterutils.filehandler.MagicsFileHandler', - 'formatter': 'magicsFormatter', - 'home_path': '' - } - }, - 'loggers': { - 'magicsLogger': { - 'handlers': ['magicsHandler'], - 'level': 'DEBUG', - 'propagate': 0 - } - } - } -}; export class JupyterSessionManager implements nb.SessionManager, vscode.Disposable { private _ready: Deferred; @@ -114,10 +69,6 @@ export class JupyterSessionManager implements nb.SessionManager, vscode.Disposab return kernel; }); - // For now, need to remove PySpark3, as it's been deprecated - // May want to have a formalized deprecated kernels mechanism in the future - kernels = kernels.filter(k => k.name !== 'pyspark3kernel'); - let allKernels: nb.IAllKernels = { defaultKernel: specs.default, kernels: kernels @@ -263,117 +214,12 @@ export class JupyterSession implements nb.ISession { }); } - public async configureKernel(): Promise { - let sparkmagicConfDir = path.join(utils.getUserHome(), '.sparkmagic'); - await utils.ensureDir(sparkmagicConfDir); - - // Default to localhost in config file. - let creds: ICredentials = { - 'url': 'http://localhost:8088' - }; - - let config: ISparkMagicConfig = Object.assign({}, configBase); - this.updateConfig(config, creds, sparkmagicConfDir); - - let configFilePath = path.join(sparkmagicConfDir, 'config.json'); - await fs.writeFile(configFilePath, JSON.stringify(config)); + configureKernel(kernelInfo: nb.IKernelSpec): Thenable { + return Promise.resolve(); } - public async configureConnection(connectionProfile: IConnectionProfile): Promise { - if (connectionProfile && connectionProfile.providerName && utils.isSparkKernel(this.sessionImpl.kernel.name)) { - Logger.log(`Configuring Spark connection`); - // %_do_not_call_change_endpoint is a SparkMagic command that lets users change endpoint options, - // such as user/profile/host name/auth type - - let knoxUsername = connectionProfile.userName || 'root'; - let knoxPassword: string = ''; - - //Update server info with bigdata endpoint - Unified Connection - if (connectionProfile.providerName === SQL_PROVIDER) { - const serverInfo: ServerInfo = await connection.getServerInfo(connectionProfile.id); - if (!serverInfo?.options['isBigDataCluster']) { - throw new Error(noBDCConnectionError); - } - const endpoints = utils.getClusterEndpoints(serverInfo); - const controllerEndpoint = endpoints.find(ep => ep.name.toLowerCase() === CONTROLLER_ENDPOINT); - - Logger.log(`Found controller endpoint ${controllerEndpoint.endpoint}`); - // root is the default username for pre-CU5 instances, so while we prefer to use the connection username - // as a default now we'll still fall back to root if it's empty for some reason. (but the calls below should - // get the actual correct value regardless) - let clusterController: bdc.IClusterController | undefined = undefined; - if (!utils.isIntegratedAuth(connectionProfile)) { - // See if the controller creds have been saved already, otherwise fall back to using - // SQL creds as a default - const credentialProvider = await credentials.getProvider('notebook.bdc.password'); - const usernameKey = `notebook.bdc.username::${connectionProfile.id}`; - const savedUsername = ExtensionContextHelper.extensionContext.globalState.get(usernameKey) || connectionProfile.userName; - const connectionCreds = await connection.getCredentials(connectionProfile.id); - const savedPassword = (await credentialProvider.readCredential(connectionProfile.id)).password || connectionCreds.password; - clusterController = await getClusterController(controllerEndpoint.endpoint, 'basic', savedUsername, savedPassword); - // Now that we know that the username/password are valid store them for use later on with the same connection - await credentialProvider.saveCredential(connectionProfile.id, clusterController.password); - await ExtensionContextHelper.extensionContext.globalState.update(usernameKey, clusterController.username); - knoxPassword = clusterController.password; - try { - knoxUsername = await clusterController.getKnoxUsername(clusterController.username); - } catch (err) { - knoxUsername = clusterController.username; - console.log(`Unexpected error getting Knox username for Spark kernel: ${err}`); - } - } else { - clusterController = await getClusterController(controllerEndpoint.endpoint, 'integrated'); - - } - - let gatewayEndpoint: bdc.IEndpointModel = endpoints?.find(ep => ep.name.toLowerCase() === KNOX_ENDPOINT_GATEWAY); - if (!gatewayEndpoint) { - Logger.log(`Querying controller for knox gateway endpoint`); - // User doesn't have permission to see the gateway endpoint from the DMV so we need to query the controller instead - const allEndpoints = (await clusterController.getEndPoints()).endPoints; - gatewayEndpoint = allEndpoints?.find(ep => ep.name.toLowerCase() === KNOX_ENDPOINT_GATEWAY); - if (!gatewayEndpoint) { - throw new Error(localize('notebook.couldNotFindKnoxGateway', "Could not find Knox gateway endpoint")); - } - } - Logger.log(`Got Knox gateway ${gatewayEndpoint.endpoint}`); - let gatewayHostAndPort = utils.getHostAndPortFromEndpoint(gatewayEndpoint.endpoint); - Logger.log(`Parsed knox host and port ${JSON.stringify(gatewayHostAndPort)}`); - connectionProfile.options[KNOX_ENDPOINT_SERVER] = gatewayHostAndPort.host; - connectionProfile.options[KNOX_ENDPOINT_PORT] = gatewayHostAndPort.port; - - } - else { - throw new Error(providerNotValidError); - } - utils.setHostAndPort(':', connectionProfile); - utils.setHostAndPort(',', connectionProfile); - - let server = vscode.Uri.parse(utils.getLivyUrl(connectionProfile.options[KNOX_ENDPOINT_SERVER], connectionProfile.options[KNOX_ENDPOINT_PORT])).toString(); - let doNotCallChangeEndpointParams: string; - let doNotCallChangeEndpointLogMessage: string; - if (utils.isIntegratedAuth(connectionProfile)) { - doNotCallChangeEndpointParams = `%_do_not_call_change_endpoint --server=${server} --auth=Kerberos`; - doNotCallChangeEndpointLogMessage = doNotCallChangeEndpointParams; - } else { - doNotCallChangeEndpointParams = `%_do_not_call_change_endpoint --username=${knoxUsername} --server=${server} --auth=Basic_Access`; - doNotCallChangeEndpointLogMessage = doNotCallChangeEndpointParams + ` --password=${'*'.repeat(knoxPassword.length)}`; - doNotCallChangeEndpointParams += ` --password=${knoxPassword}`; - } - Logger.log(`Change endpoint command '${doNotCallChangeEndpointLogMessage}'`); - let future = this.sessionImpl.kernel.requestExecute({ - code: doNotCallChangeEndpointParams - }, true); - await future.done; - } - } - - private updateConfig(config: ISparkMagicConfig, creds: ICredentials, homePath: string): void { - config.kernel_python_credentials = creds; - config.kernel_scala_credentials = creds; - config.kernel_r_credentials = creds; - config.logging_config.handlers.magicsHandler.home_path = homePath; - config.ignore_ssl_errors = utils.getIgnoreSslVerificationConfigSetting(); + configureConnection(connection: IConnectionProfile): Thenable { + return Promise.resolve(); } private async setEnvironmentVars(skip: boolean = false): Promise { @@ -404,76 +250,3 @@ export class JupyterSession implements nb.ISession { this._messagesComplete.resolve(); } } - -async function getClusterController(controllerEndpoint: string, authType: bdc.AuthType, username?: string, password?: string): Promise { - Logger.log(`Getting cluster controller ${controllerEndpoint}. Auth=${authType} Username=${username} password=${'*'.repeat(password?.length ?? 0)}`); - const bdcApi = await vscode.extensions.getExtension(bdc.constants.extensionName).activate(); - const controller = bdcApi.getClusterController( - controllerEndpoint, - authType, - username, - password); - try { - Logger.log(`Fetching endpoints for ${controllerEndpoint} to test connection...`); - // We just want to test the connection - so using getEndpoints since that is available to all users (not just admin) - await controller.getEndPoints(); - return controller; - } catch (err) { - // Initial username/password failed so prompt user for username password until either user - // cancels out or we successfully connect - console.log(`Error connecting to cluster controller: ${err}`); - let errorMessage = ''; - const prompter = new CodeAdapter(); - while (true) { - const newUsername = await prompter.promptSingle({ - type: QuestionTypes.input, - name: 'inputPrompt', - message: localize('promptBDCUsername', "{0}Please provide the username to connect to the BDC Controller:", errorMessage), - default: username - }); - if (!username) { - console.log(`User cancelled out of username prompt for BDC Controller`); - break; - } - const newPassword = await prompter.promptSingle({ - type: QuestionTypes.password, - name: 'passwordPrompt', - message: localize('promptBDCPassword', "Please provide the password to connect to the BDC Controller"), - default: '' - }); - if (!password) { - console.log(`User cancelled out of password prompt for BDC Controller`); - break; - } - const controller = bdcApi.getClusterController(controllerEndpoint, authType, newUsername, newPassword); - try { - // We just want to test the connection - so using getEndpoints since that is available to all users (not just admin) - await controller.getEndPoints(); - return controller; - } catch (err) { - errorMessage = localize('bdcConnectError', "Error: {0}. ", err.message ?? err); - } - } - throw new Error(localize('clusterControllerConnectionRequired', "A connection to the cluster controller is required to run Spark jobs")); - } -} - -interface ICredentials { - 'url': string; -} - -interface ISparkMagicConfig { - kernel_python_credentials: ICredentials; - kernel_scala_credentials: ICredentials; - kernel_r_credentials: ICredentials; - ignore_ssl_errors?: boolean; - logging_config: { - handlers: { - magicsHandler: { - home_path: string; - class?: string; - formatter?: string - } - } - }; -} diff --git a/extensions/notebook/src/test/common/notebookUtils.test.ts b/extensions/notebook/src/test/common/notebookUtils.test.ts index 95345b6838..3fbeab7027 100644 --- a/extensions/notebook/src/test/common/notebookUtils.test.ts +++ b/extensions/notebook/src/test/common/notebookUtils.test.ts @@ -203,39 +203,4 @@ describe('notebookUtils Tests', function (): void { should(notebookEditor.document.cells[0].contents.cell_type).equal(CellTypes.Markdown); }); }); - - describe('analyzeNotebook', function () { - it('creates cell when oeContext exists', async function (): Promise { - await azdata.nb.showNotebookDocument(vscode.Uri.from({ scheme: 'untitled' })); - const notebookEditor = azdata.nb.activeNotebookEditor; - sinon.replaceGetter(azdata.nb, 'activeNotebookEditor', () => notebookEditor); - sinon.stub(azdata.nb, 'showNotebookDocument').returns(Promise.resolve(notebookEditor)); - const oeContext: azdata.ObjectExplorerContext = { - connectionProfile: undefined, - isConnectionNode: true, - nodeInfo: { - nodePath: 'path/HDFS/path2', - errorMessage: undefined, - isLeaf: false, - label: 'fakeLabel', - metadata: undefined, - nodeStatus: undefined, - nodeSubType: undefined, - nodeType: undefined - } - }; - await notebookUtils.analyzeNotebook(oeContext); - should(notebookEditor.document.cells.length).equal(1, 'One cell should exist'); - should(notebookEditor.document.cells[0].contents.cell_type).equal(CellTypes.Code, 'Cell was created with incorrect type'); - }); - - it('does not create new cell when oeContext does not exist', async function (): Promise { - await azdata.nb.showNotebookDocument(vscode.Uri.from({ scheme: 'untitled' })); - const notebookEditor = azdata.nb.activeNotebookEditor; - sinon.replaceGetter(azdata.nb, 'activeNotebookEditor', () => notebookEditor); - sinon.stub(azdata.nb, 'showNotebookDocument').returns(Promise.resolve(notebookEditor)); - await notebookUtils.analyzeNotebook(); - should(notebookEditor.document.cells.length).equal(0, 'No cells should exist'); - }); - }); }); diff --git a/extensions/notebook/src/test/common/utils.test.ts b/extensions/notebook/src/test/common/utils.test.ts index 9dea386a72..9b5258e808 100644 --- a/extensions/notebook/src/test/common/utils.test.ts +++ b/extensions/notebook/src/test/common/utils.test.ts @@ -10,23 +10,9 @@ import * as os from 'os'; import * as path from 'path'; import * as utils from '../../common/utils'; import { MockOutputChannel } from './stubs'; -import * as azdata from 'azdata'; import { sleep } from './testUtils'; describe('Utils Tests', function () { - - it('getKnoxUrl', () => { - const host = '127.0.0.1'; - const port = '8080'; - should(utils.getKnoxUrl(host, port)).endWith('/gateway'); - }); - - it('getLivyUrl', () => { - const host = '127.0.0.1'; - const port = '8080'; - should(utils.getLivyUrl(host, port)).endWith('/gateway/default/livy/v1/'); - }); - it('ensureDir', async () => { const dirPath = path.join(os.tmpdir(), uuid.v4()); await should(fs.stat(dirPath)).be.rejected(); @@ -273,63 +259,6 @@ describe('Utils Tests', function () { }); }); - describe('getClusterEndpoints', () => { - const baseServerInfo: azdata.ServerInfo = { - serverMajorVersion: -1, - serverMinorVersion: -1, - serverReleaseVersion: -1, - engineEditionId: -1, - serverVersion: '', - serverLevel: '', - serverEdition: '', - isCloud: false, - azureVersion: -1, - osVersion: '', - options: {}, - cpuCount: -1, - physicalMemoryInMb: -1 - }; - it('empty endpoints does not error', () => { - const serverInfo = Object.assign({}, baseServerInfo); - serverInfo.options['clusterEndpoints'] = []; - should(utils.getClusterEndpoints(serverInfo).length).equal(0); - }); - - it('endpoints without endpoint field are created successfully', () => { - const serverInfo = Object.assign({}, baseServerInfo); - const ipAddress = 'localhost'; - const port = '123'; - serverInfo.options['clusterEndpoints'] = [{ ipAddress: ipAddress, port: port }]; - const endpoints = utils.getClusterEndpoints(serverInfo); - should(endpoints.length).equal(1); - should(endpoints[0].endpoint).equal('https://localhost:123'); - }); - - it('endpoints with endpoint field are created successfully', () => { - const endpoint = 'https://myActualEndpoint:8080'; - const serverInfo = Object.assign({}, baseServerInfo); - serverInfo.options['clusterEndpoints'] = [{ endpoint: endpoint, ipAddress: 'localhost', port: '123' }]; - const endpoints = utils.getClusterEndpoints(serverInfo); - should(endpoints.length).equal(1); - should(endpoints[0].endpoint).equal(endpoint); - }); - }); - - describe('getHostAndPortFromEndpoint', () => { - it('valid endpoint is parsed correctly', () => { - const host = 'localhost'; - const port = '123'; - const hostAndIp = utils.getHostAndPortFromEndpoint(`https://${host}:${port}`); - should(hostAndIp).deepEqual({ host: host, port: port }); - }); - - it('invalid endpoint is returned as is', () => { - const host = 'localhost'; - const hostAndIp = utils.getHostAndPortFromEndpoint(`https://${host}`); - should(hostAndIp).deepEqual({ host: host, port: undefined }); - }); - }); - describe('exists', () => { it('runs as expected', async () => { const filename = path.join(os.tmpdir(), `NotebookUtilsTest_${uuid.v4()}`); @@ -345,12 +274,6 @@ describe('Utils Tests', function () { }); }); - describe('getIgnoreSslVerificationConfigSetting', () => { - it('runs as expected', async () => { - should(utils.getIgnoreSslVerificationConfigSetting()).be.true(); - }); - }); - describe('debounce', () => { class DebounceTest { public fnCalled = 0; diff --git a/extensions/notebook/src/test/model/sessionManager.test.ts b/extensions/notebook/src/test/model/sessionManager.test.ts index 6fbc45eef3..629ccdb9dd 100644 --- a/extensions/notebook/src/test/model/sessionManager.test.ts +++ b/extensions/notebook/src/test/model/sessionManager.test.ts @@ -5,42 +5,18 @@ import * as should from 'should'; import * as TypeMoq from 'typemoq'; -import * as utils from '../../common/utils'; import * as sinon from 'sinon'; -import * as os from 'os'; -import * as fs from 'fs'; -import * as path from 'path'; -import * as bdc from 'bdc'; import * as vscode from 'vscode'; -import { nb, IConnectionProfile, connection, ConnectionOptionSpecialType, ServerInfo } from 'azdata'; +import { nb } from 'azdata'; import { SessionManager, Session, Kernel } from '@jupyterlab/services'; import 'mocha'; import { JupyterSessionManager, JupyterSession } from '../../jupyter/jupyterSessionManager'; import { Deferred } from '../../common/promise'; import { SessionStub, KernelStub, FutureStub } from '../common'; -import { noBDCConnectionError, providerNotValidError } from '../../common/localizedConstants'; import { ExtensionContextHelper } from '../../common/extensionContextHelper'; import { AppContext } from '../../common/appContext'; -import uuid = require('uuid'); -class TestClusterController implements bdc.IClusterController { - getClusterConfig(): Promise { - return Promise.resolve({}); - } - getKnoxUsername(clusterUsername: string): Promise { - return Promise.resolve('knoxUsername'); - } - getEndPoints(promptConnect?: boolean): Promise { - return Promise.resolve( { - response: undefined, - endPoints: [] - }); - } - username: string; - password: string; -} - -before(async function(): Promise { +before(async function (): Promise { // We have to reset the extension context here since the test runner unloads the files before running the tests // so the static state is lost const api = await vscode.extensions.getExtension('Microsoft.notebook').activate(); @@ -207,181 +183,6 @@ describe('Jupyter Session', function (): void { should(options.name).equal('python'); }); - it('should write configuration to config.json file', async function (): Promise { - let tempDir = os.tmpdir(); - let configPath = path.join(tempDir, '.sparkmagic', 'config.json'); - const expectedResult = { - 'kernel_python_credentials': { - 'url': 'http://localhost:8088' - }, - 'kernel_scala_credentials': { - 'url': 'http://localhost:8088' - }, - 'kernel_r_credentials': { - 'url': 'http://localhost:8088' - }, - 'livy_session_startup_timeout_seconds': 100, - 'logging_config': { - 'version': 1, - 'formatters': { - 'magicsFormatter': { - 'format': '%(asctime)s\t%(levelname)s\t%(message)s', - 'datefmt': '' - } - }, - 'handlers': { - 'magicsHandler': { - 'class': 'hdijupyterutils.filehandler.MagicsFileHandler', - 'formatter': 'magicsFormatter', - 'home_path': '' - } - }, - 'loggers': { - 'magicsLogger': { - 'handlers': ['magicsHandler'], - 'level': 'DEBUG', - 'propagate': 0 - } - } - }, - 'ignore_ssl_errors': true, - }; - expectedResult.logging_config.handlers.magicsHandler.home_path = path.join(tempDir, '.sparkmagic'); - sinon.stub(utils, 'getUserHome').returns(tempDir); - await session.configureKernel(); - let result = await fs.promises.readFile(configPath, 'utf-8'); - should(JSON.parse(result) === expectedResult); - }); - - it('should configure connection correctly for MSSQL and SqlLogin auth type', async function (): Promise { - const isLinux = os.platform() === 'linux'; - if (!isLinux) { - let connectionProfile: IConnectionProfile = { - authenticationType: '', - connectionName: '', - databaseName: '', - id: 'id', - providerName: 'MSSQL', - options: { - authenticationType: connection.AuthenticationType.SqlLogin, - }, - password: '', - savePassword: false, - saveProfile: false, - serverName: '', - userName: '' - }; - let futureMock = TypeMoq.Mock.ofType(FutureStub); - let kernelMock = TypeMoq.Mock.ofType(KernelStub); - kernelMock.setup(k => k.name).returns(() => 'spark'); - kernelMock.setup(m => m.requestExecute(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => futureMock.object); - mockJupyterSession.setup(s => s.kernel).returns(() => kernelMock.object); - let creds = { [ConnectionOptionSpecialType.password]: 'password' }; - sinon.stub(connection, 'getCredentials').returns(Promise.resolve(creds)); - - // Set up connection info to big data cluster - const mockServerInfo: ServerInfo = { - serverMajorVersion: 0, - serverMinorVersion: 0, - serverReleaseVersion: 0, - engineEditionId: 0, - serverVersion: '', - serverLevel: '', - serverEdition: '', - isCloud: false, - azureVersion: 0, - osVersion: '', - cpuCount: 0, - physicalMemoryInMb: -1, - options: { - isBigDataCluster: true - } - }; - const mockGatewayEndpoint: bdc.IEndpointModel = { - name: 'gateway', - description: '', - endpoint: '', - protocol: '', - }; - const mockControllerEndpoint: bdc.IEndpointModel = { - name: 'controller', - description: '', - endpoint: '', - protocol: '', - }; - const mockHostAndIp: utils.HostAndIp = { - host: '127.0.0.1', - port: '1337' - }; - const mockClustercontroller = new TestClusterController(); - mockClustercontroller.username = 'admin'; - mockClustercontroller.password = uuid.v4(); - let mockBdcExtension: TypeMoq.IMock = TypeMoq.Mock.ofType(); - let mockExtension: TypeMoq.IMock> = TypeMoq.Mock.ofType>(); - mockBdcExtension.setup(m => m.getClusterController(TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => mockClustercontroller); - mockBdcExtension.setup((m: any) => m.then).returns(() => mockBdcExtension); - mockExtension.setup(m => m.activate()).returns(() => Promise.resolve(mockBdcExtension.object)); - mockExtension.setup((m: any) => m.then).returns(() => mockExtension); - sinon.stub(vscode.extensions, 'getExtension').returns(mockExtension.object); - sinon.stub(connection, 'getServerInfo').returns(Promise.resolve(mockServerInfo)); - sinon.stub(utils, 'getClusterEndpoints').returns([mockGatewayEndpoint, mockControllerEndpoint]); - sinon.stub(utils, 'getHostAndPortFromEndpoint').returns(mockHostAndIp); - await session.configureConnection(connectionProfile); - should(connectionProfile.options['host']).equal(mockHostAndIp.host); - should(connectionProfile.options['knoxport']).equal(mockHostAndIp.port); - } - }); - - it('configure connection should throw error if there is no connection to big data cluster', async function (): Promise { - let connectionProfile: IConnectionProfile = { - authenticationType: '', - connectionName: '', - databaseName: '', - id: 'id', - providerName: 'MSSQL', - options: { - authenticationType: connection.AuthenticationType.SqlLogin, - }, - password: '', - savePassword: false, - saveProfile: false, - serverName: '', - userName: '' - }; - let futureMock = TypeMoq.Mock.ofType(FutureStub); - let kernelMock = TypeMoq.Mock.ofType(KernelStub); - kernelMock.setup(k => k.name).returns(() => 'spark'); - kernelMock.setup(m => m.requestExecute(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => futureMock.object); - mockJupyterSession.setup(s => s.kernel).returns(() => kernelMock.object); - let credentials = { [ConnectionOptionSpecialType.password]: 'password' }; - sinon.stub(connection, 'getCredentials').returns(Promise.resolve(credentials)); - await should(session.configureConnection(connectionProfile)).be.rejectedWith(noBDCConnectionError); - }); - - it('configure connection should throw error if provider is not MSSQL for spark kernel', async function (): Promise { - let connectionProfile: IConnectionProfile = { - authenticationType: '', - connectionName: '', - databaseName: '', - id: 'id', - providerName: 'provider', - options: { - authenticationType: connection.AuthenticationType.SqlLogin, - }, - password: '', - savePassword: false, - saveProfile: false, - serverName: '', - userName: '' - }; - let futureMock = TypeMoq.Mock.ofType(FutureStub); - let kernelMock = TypeMoq.Mock.ofType(KernelStub); - kernelMock.setup(k => k.name).returns(() => 'spark'); - kernelMock.setup(m => m.requestExecute(TypeMoq.It.isAny(), TypeMoq.It.isAny())).returns(() => futureMock.object); - mockJupyterSession.setup(s => s.kernel).returns(() => kernelMock.object); - await should(session.configureConnection(connectionProfile)).be.rejectedWith(providerNotValidError); - }); - it('should set environment variables correctly', function (): void { let futureMock = TypeMoq.Mock.ofType(FutureStub); let kernelMock = TypeMoq.Mock.ofType(KernelStub); diff --git a/extensions/notebook/src/test/python/jupyterInstallation.test.ts b/extensions/notebook/src/test/python/jupyterInstallation.test.ts index c50e7606e0..a08bd31c25 100644 --- a/extensions/notebook/src/test/python/jupyterInstallation.test.ts +++ b/extensions/notebook/src/test/python/jupyterInstallation.test.ts @@ -11,8 +11,8 @@ import * as uuid from 'uuid'; import * as fs from 'fs-extra'; import * as request from 'request'; import * as utils from '../../common/utils'; -import { requiredJupyterPkg, JupyterServerInstallation, requiredPowershellPkg, PythonInstallSettings, PythonPkgDetails, requiredSparkPackages } from '../../jupyter/jupyterServerInstallation'; -import { powershellDisplayName, pysparkDisplayName, python3DisplayName, sparkRDisplayName, sparkScalaDisplayName, winPlatform } from '../../common/constants'; +import { requiredJupyterPkg, JupyterServerInstallation, requiredPowershellPkg, PythonInstallSettings, PythonPkgDetails } from '../../jupyter/jupyterServerInstallation'; +import { powershellDisplayName, python3DisplayName, winPlatform } from '../../common/constants'; describe('Jupyter Server Installation', function () { let outputChannelStub: TypeMoq.IMock; @@ -234,17 +234,6 @@ describe('Jupyter Server Installation', function () { should(packages).be.deepEqual([requiredJupyterPkg, requiredPowershellPkg]); }); - it('Get required packages test - Spark kernels', async function () { - let packages = installation.getRequiredPackagesForKernel(pysparkDisplayName); - should(packages).be.deepEqual(requiredSparkPackages, 'Unexpected packages for PySpark kernel.'); - - packages = installation.getRequiredPackagesForKernel(sparkScalaDisplayName); - should(packages).be.deepEqual(requiredSparkPackages, 'Unexpected packages for Spark Scala kernel.'); - - packages = installation.getRequiredPackagesForKernel(sparkRDisplayName); - should(packages).be.deepEqual(requiredSparkPackages, 'Unexpected packages for Spark R kernel.'); - }); - it('Install python test - Run install while Python is already running', async function () { // Should reject overwriting an existing python install if running on Windows and python is currently running. if (process.platform === winPlatform) { diff --git a/extensions/notebook/src/typings/refs.d.ts b/extensions/notebook/src/typings/refs.d.ts index c6a856b58e..420c12b6ad 100644 --- a/extensions/notebook/src/typings/refs.d.ts +++ b/extensions/notebook/src/typings/refs.d.ts @@ -6,5 +6,4 @@ /// /// /// -/// /// diff --git a/extensions/resource-deployment/DEVELOPER_GUIDE.md b/extensions/resource-deployment/DEVELOPER_GUIDE.md index 14f8ceeb96..9ad8d34e1f 100644 --- a/extensions/resource-deployment/DEVELOPER_GUIDE.md +++ b/extensions/resource-deployment/DEVELOPER_GUIDE.md @@ -116,8 +116,6 @@ See [NotebookWizardInfo](https://github.com/microsoft/azuredatastudio/blob/main/ ### WizardInfoBase -`type` - **OPTIONAL** This is an internal type only used for BDC deployment wizards. Any other deployment providers can leave it out. - `doneAction` `scriptAction` - **OPTIONAL** @@ -220,7 +218,7 @@ This object defines a set of options for a field, similar to the arrays that can `source` - OPTIONAL If set defines the [Options Source Provider](#options-source-provider) to use for populating the options dynamically. ### Dynamic Options -This enables you to dynamically change what options for a field are displayed to the user based on a previous selection they made. +This enables you to dynamically change what options for a field are displayed to the user based on a previous selection they made. For example, if a user selects "Cookies" over "Cakes" in the first field, the second field will show options ["Chocolate chip", "Snickerdoodle"] instead of ["Red velvet", "Cheesecake", "Black forest"], and vice versa. **NOTE** This is currently only enabled for radio buttons. This works with [CategoryValue](#fieldinfo) values as well. diff --git a/extensions/resource-deployment/src/interfaces.ts b/extensions/resource-deployment/src/interfaces.ts index dbf0342604..609c460e4e 100644 --- a/extensions/resource-deployment/src/interfaces.ts +++ b/extensions/resource-deployment/src/interfaces.ts @@ -77,10 +77,6 @@ export interface DialogDeploymentProvider extends DeploymentProviderBase { dialog: DialogInfo; } -export interface BdcWizardDeploymentProvider extends DeploymentProviderBase { - bdcWizard: BdcWizardInfo; -} - export interface NotebookWizardDeploymentProvider extends DeploymentProviderBase { notebookWizard: NotebookWizardInfo; } @@ -113,10 +109,6 @@ export function instanceOfDialogDeploymentProvider(obj: any): obj is DialogDeplo return obj && 'dialog' in obj; } -export function instanceOfWizardDeploymentProvider(obj: any): obj is BdcWizardDeploymentProvider { - return obj && 'bdcWizard' in obj; -} - export function instanceOfNotebookWizardDeploymentProvider(obj: any): obj is NotebookWizardDeploymentProvider { return obj && 'notebookWizard' in obj; } @@ -151,12 +143,8 @@ export interface DeploymentProviderBase { when: string; } -export type DeploymentProvider = DialogDeploymentProvider | BdcWizardDeploymentProvider | NotebookWizardDeploymentProvider | NotebookDeploymentProvider | WebPageDeploymentProvider | DownloadDeploymentProvider | CommandDeploymentProvider | AzureSQLVMDeploymentProvider | AzureSQLDBDeploymentProvider; +export type DeploymentProvider = DialogDeploymentProvider | NotebookWizardDeploymentProvider | NotebookDeploymentProvider | WebPageDeploymentProvider | DownloadDeploymentProvider | CommandDeploymentProvider | AzureSQLVMDeploymentProvider | AzureSQLDBDeploymentProvider; -export interface BdcWizardInfo { - notebook: string | NotebookPathInfo; - type: BdcDeploymentType; -} /** * An object that configures Script and Done buttons of the wizard. */ @@ -183,7 +171,6 @@ export interface NotebookWizardInfo extends WizardInfoBase { } export interface WizardInfoBase extends FieldInfoBase { - type?: DeploymentType; /** * done button attributes. */ @@ -461,7 +448,6 @@ export enum ToolType { AzCli, KubeCtl, Docker, - Azdata } export const enum ToolStatus { @@ -497,16 +483,6 @@ export interface ITool { promptForEula(): Promise; } -export const enum BdcDeploymentType { - NewAKS = 'new-aks', - ExistingAKS = 'existing-aks', - ExistingKubeAdm = 'existing-kubeadm', - ExistingARO = 'existing-aro', - ExistingOpenShift = 'existing-openshift' -} - -export type DeploymentType = BdcDeploymentType; - export interface Command { command: string; sudo?: boolean; diff --git a/extensions/resource-deployment/src/main.ts b/extensions/resource-deployment/src/main.ts index e138b70cff..8a390a5812 100644 --- a/extensions/resource-deployment/src/main.ts +++ b/extensions/resource-deployment/src/main.ts @@ -49,9 +49,7 @@ export async function activate(context: vscode.ExtensionContext): Promise { openDialog('sql-image'); }); - vscode.commands.registerCommand('azdata.resource.sql-bdc.deploy', () => { - openDialog('sql-bdc'); - }); + /** * Command to open the Resource Deployment wizard - with options to filter the values shown * @param defaultResourceTypeName - The default resourceType to be selected diff --git a/extensions/resource-deployment/src/services/azdataService.ts b/extensions/resource-deployment/src/services/azdataService.ts deleted file mode 100644 index ce2c4c882d..0000000000 --- a/extensions/resource-deployment/src/services/azdataService.ts +++ /dev/null @@ -1,75 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import * as path from 'path'; -import { IPlatformService } from './platformService'; -import { BigDataClusterDeploymentProfile } from './bigDataClusterDeploymentProfile'; -import { BdcDeploymentType } from '../interfaces'; - -interface BdcConfigListOutput { - result: string[]; -} - -export interface BdcEndpoint { - endpoint: string; - name: 'sql-server-master'; -} - -export interface IAzdataService { - getDeploymentProfiles(deploymentType: BdcDeploymentType): Promise; -} - -export class AzdataService implements IAzdataService { - constructor(private platformService: IPlatformService) { - } - - public async getDeploymentProfiles(deploymentType: BdcDeploymentType): Promise { - let profilePrefix: string; - switch (deploymentType) { - case BdcDeploymentType.NewAKS: - case BdcDeploymentType.ExistingAKS: - profilePrefix = 'aks'; - break; - case BdcDeploymentType.ExistingKubeAdm: - profilePrefix = 'kubeadm'; - break; - case BdcDeploymentType.ExistingARO: - profilePrefix = 'aro'; - break; - case BdcDeploymentType.ExistingOpenShift: - profilePrefix = 'openshift'; - break; - default: - throw new Error(`Unknown deployment type: ${deploymentType}`); - } - const profileNames = await this.getDeploymentProfileNames(); - return await Promise.all(profileNames.filter(profile => profile.startsWith(profilePrefix)).map(profile => this.getDeploymentProfileInfo(profile))); - } - - private async getDeploymentProfileNames(): Promise { - const env: NodeJS.ProcessEnv = {}; - // azdata requires this environment variables to be set - env['ACCEPT_EULA'] = 'yes'; - const cmd = 'azdata bdc config list -o json'; - const stdout = await this.platformService.runCommand(cmd, { additionalEnvironmentVariables: env }); - const output = JSON.parse(stdout); - return output.result; - } - - private async getDeploymentProfileInfo(profileName: string): Promise { - const env: NodeJS.ProcessEnv = {}; - // azdata requires this environment variables to be set - env['ACCEPT_EULA'] = 'yes'; - await this.platformService.runCommand(`azdata bdc config init --source ${profileName} --path ${profileName} --force`, { workingDirectory: this.platformService.storagePath(), additionalEnvironmentVariables: env }); - const configObjects = await Promise.all([ - this.getJsonObjectFromFile(path.join(this.platformService.storagePath(), profileName, 'bdc.json')), - this.getJsonObjectFromFile(path.join(this.platformService.storagePath(), profileName, 'control.json')) - ]); - return new BigDataClusterDeploymentProfile(profileName, configObjects[0], configObjects[1]); - } - - private async getJsonObjectFromFile(path: string): Promise { - return JSON.parse(await this.platformService.readTextFile(path)); - } -} diff --git a/extensions/resource-deployment/src/services/bigDataClusterDeploymentProfile.ts b/extensions/resource-deployment/src/services/bigDataClusterDeploymentProfile.ts deleted file mode 100644 index fee5d21d9b..0000000000 --- a/extensions/resource-deployment/src/services/bigDataClusterDeploymentProfile.ts +++ /dev/null @@ -1,349 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import { AuthenticationMode } from '../ui/deployClusterWizard/deployClusterWizardModel'; -export const SqlServerMasterResource = 'master'; -export const DataResource = 'data-0'; -export const HdfsResource = 'storage-0'; -export const ComputeResource = 'compute-0'; -export const NameNodeResource = 'nmnode-0'; -export const SparkHeadResource = 'sparkhead'; -export const ZooKeeperResource = 'zookeeper'; -export const SparkResource = 'spark-0'; - -interface ServiceEndpoint { - port: number; - serviceType: ServiceType; - name: EndpointName; - dnsName?: string; -} -type ServiceType = 'NodePort' | 'LoadBalancer'; -type EndpointName = 'Controller' | 'Master' | 'Knox' | 'MasterSecondary' | 'AppServiceProxy' | 'ServiceProxy'; - -export interface ActiveDirectorySettings { - organizationalUnit: string; - domainControllerFQDNs: string; - dnsIPAddresses: string; - domainDNSName: string; - realm?: string; - clusterUsers: string; - clusterAdmins: string; - appReaders?: string; - appOwners?: string; - subdomain?: string; - accountPrefix?: string; -} - -export class BigDataClusterDeploymentProfile { - constructor(private _profileName: string, private _bdcConfig: any, private _controlConfig: any) { - // TODO: add validation logic for these 2 objects - // https://github.com/microsoft/azuredatastudio/issues/7344 - } - - public get profileName(): string { - return this._profileName; - } - - public get clusterName(): string { - return this._bdcConfig.metadata.name; - } - - public set clusterName(value: string) { - this._bdcConfig.metadata.name = value; - } - - public get registry(): string { - return this._controlConfig.spec.docker.registry; - } - - public set registry(value: string) { - this._controlConfig.spec.docker.registry = value; - } - - public get repository(): string { - return this._controlConfig.spec.docker.repository; - } - - public set repository(value: string) { - this._controlConfig.spec.docker.repository = value; - } - - public get imageTag(): string { - return this._controlConfig.spec.docker.imageTag; - } - - public set imageTag(value: string) { - this._controlConfig.spec.docker.imageTag = value; - } - - public get bdcConfig(): any { - return this._bdcConfig; - } - - public get controlConfig(): any { - return this._controlConfig; - } - - public get sqlServerReplicas(): number { - return this.getReplicas(SqlServerMasterResource); - } - - public set sqlServerReplicas(replicas: number) { - this.setReplicas(SqlServerMasterResource, replicas); - } - - public get hdfsNameNodeReplicas(): number { - return this.getReplicas(NameNodeResource); - } - - public set hdfsNameNodeReplicas(replicas: number) { - this.setReplicas(NameNodeResource, replicas); - } - - public get sparkHeadReplicas(): number { - return this.getReplicas(SparkHeadResource); - } - - public set sparkHeadReplicas(replicas: number) { - this.setReplicas(SparkHeadResource, replicas); - } - - public get dataReplicas(): number { - return this.getReplicas(DataResource); - } - - public set dataReplicas(replicas: number) { - this.setReplicas(SparkHeadResource, replicas); - } - - public get hdfsReplicas(): number { - return this.getReplicas(HdfsResource); - } - - public set hdfsReplicas(replicas: number) { - this.setReplicas(HdfsResource, replicas); - } - - public get zooKeeperReplicas(): number { - return this.getReplicas(ZooKeeperResource); - } - - public set zooKeeperReplicas(replicas: number) { - this.setReplicas(ZooKeeperResource, replicas); - } - - public get computeReplicas(): number { - return this.getReplicas(ComputeResource); - } - - public set computeReplicas(replicas: number) { - this.setReplicas(ComputeResource, replicas); - } - - public get sparkReplicas(): number { - return this._bdcConfig.spec.resources[SparkResource] ? this.getReplicas(SparkResource) : 0; - } - - public get includeSpark(): boolean { - return this._bdcConfig.spec.resources[HdfsResource].spec.settings.spark.includeSpark; - } - - public set includeSpark(value: boolean) { - this._bdcConfig.spec.resources[HdfsResource].spec.settings.spark.includeSpark = value; - } - - public get controllerDataStorageClass(): string { - return this._controlConfig.spec.storage.data.className; - } - - public set controllerDataStorageClass(value: string) { - this._controlConfig.spec.storage.data.className = value; - } - - public get controllerDataStorageSize(): number { - return this._controlConfig.spec.storage.data.size.replace('Gi', ''); - } - - public set controllerDataStorageSize(value: number) { - this._controlConfig.spec.storage.data.size = `${value}Gi`; - } - - public get controllerLogsStorageClass(): string { - return this._controlConfig.spec.storage.logs.className; - } - - public set controllerLogsStorageClass(value: string) { - this._controlConfig.spec.storage.logs.className = value; - } - - public get controllerLogsStorageSize(): number { - return this._controlConfig.spec.storage.logs.size.replace('Gi', ''); - } - - public set controllerLogsStorageSize(value: number) { - this._controlConfig.spec.storage.logs.size = `${value}Gi`; - } - - public setResourceStorage(resourceName: 'data-0' | 'master' | 'storage-0', dataStorageClass: string, dataStorageSize: number, logsStorageClass: string, logsStorageSize: number) { - this.bdcConfig.spec.resources[resourceName].spec.storage = { - data: { - size: `${dataStorageSize}Gi`, - className: dataStorageClass, - accessMode: 'ReadWriteOnce' - }, - logs: { - size: `${logsStorageSize}Gi`, - className: logsStorageClass, - accessMode: 'ReadWriteOnce' - } - }; - } - - public get controllerPort(): number { - return this.getEndpointPort(this._controlConfig.spec.endpoints, 'Controller', 30080); - } - - public setControllerEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._controlConfig.spec.endpoints, 'Controller', port, dnsName); - } - - public get serviceProxyPort(): number { - return this.getEndpointPort(this._controlConfig.spec.endpoints, 'ServiceProxy', 30080); - } - - public setServiceProxyEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._controlConfig.spec.endpoints, 'ServiceProxy', port, dnsName); - } - - public get appServiceProxyPort(): number { - return this.getEndpointPort(this._bdcConfig.spec.resources.appproxy.spec.endpoints, 'AppServiceProxy', 30777); - } - - public setAppServiceProxyEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._bdcConfig.spec.resources.appproxy.spec.endpoints, 'AppServiceProxy', port, dnsName); - } - - public get sqlServerPort(): number { - return this.getEndpointPort(this._bdcConfig.spec.resources.master.spec.endpoints, 'Master', 31433); - } - - public setSqlServerEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._bdcConfig.spec.resources.master.spec.endpoints, 'Master', port, dnsName); - } - - public get sqlServerReadableSecondaryPort(): number { - return this.getEndpointPort(this._bdcConfig.spec.resources.master.spec.endpoints, 'MasterSecondary', 31436); - } - - public setSqlServerReadableSecondaryEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._bdcConfig.spec.resources.master.spec.endpoints, 'MasterSecondary', port, dnsName); - } - - public get gatewayPort(): number { - return this.getEndpointPort(this._bdcConfig.spec.resources.gateway.spec.endpoints, 'Knox', 30443); - } - - public setGatewayEndpoint(port: number, dnsName?: string) { - this.setEndpoint(this._bdcConfig.spec.resources.gateway.spec.endpoints, 'Knox', port, dnsName); - } - - public addSparkResource(replicas: number): void { - this._bdcConfig.spec.resources[SparkResource] = { - metadata: { - kind: 'Pool', - name: 'default' - }, - spec: { - type: 'Spark', - replicas: replicas - } - }; - - this._bdcConfig.spec.services.spark.resources.push(SparkResource); - this._bdcConfig.spec.services.hdfs.resources.push(SparkResource); - } - - public get activeDirectorySupported(): boolean { - // The profiles that highlight the AD authentication feature will have a security secion in the control.json for the AD settings. - return 'security' in this._controlConfig; - } - - public setAuthenticationMode(mode: string): void { - // If basic authentication is picked, the activeDirectory security section must be removed - // otherwise azdata will throw validation error - if (mode === AuthenticationMode.Basic && 'security' in this._controlConfig && 'activeDirectory' in this._controlConfig.security) { - delete this._controlConfig.security.activeDirectory; - } - } - - public setActiveDirectorySettings(adSettings: ActiveDirectorySettings): void { - const activeDirectoryObject: any = {}; - activeDirectoryObject.ouDistinguishedName = adSettings.organizationalUnit; - activeDirectoryObject.dnsIpAddresses = this.splitByComma(adSettings.dnsIPAddresses); - activeDirectoryObject.domainControllerFullyQualifiedDns = this.splitByComma(adSettings.domainControllerFQDNs.toLowerCase()); - activeDirectoryObject.domainDnsName = adSettings.domainDNSName; - activeDirectoryObject.subdomain = adSettings.subdomain; - activeDirectoryObject.accountPrefix = adSettings.accountPrefix; - activeDirectoryObject.realm = adSettings.realm ?? adSettings.domainDNSName.toUpperCase(); - activeDirectoryObject.clusterAdmins = this.splitByComma(adSettings.clusterAdmins); - activeDirectoryObject.clusterUsers = this.splitByComma(adSettings.clusterUsers); - if (adSettings.appReaders) { - activeDirectoryObject.appReaders = this.splitByComma(adSettings.appReaders); - } - if (adSettings.appOwners) { - activeDirectoryObject.appOwners = this.splitByComma(adSettings.appOwners); - } - - this._controlConfig.security.activeDirectory = activeDirectoryObject; - } - - public getBdcJson(readable: boolean = true): string { - return this.stringifyJson(this._bdcConfig, readable); - } - - public getControlJson(readable: boolean = true): string { - return this.stringifyJson(this._controlConfig, readable); - } - - private stringifyJson(obj: any, readable: boolean): string { - return JSON.stringify(obj, undefined, readable ? 4 : 0); - } - - private getReplicas(resourceName: string): number { - return this._bdcConfig.spec.resources[resourceName].spec.replicas; - } - - private setReplicas(resourceName: string, replicas: number): void { - this._bdcConfig.spec.resources[resourceName].spec.replicas = replicas; - } - - private getEndpointPort(endpoints: ServiceEndpoint[], name: EndpointName, defaultValue: number): number { - const endpoint = endpoints.find(endpoint => endpoint.name === name); - return endpoint ? endpoint.port : defaultValue; - } - - private setEndpoint(endpoints: ServiceEndpoint[], name: EndpointName, port: number, dnsName?: string): void { - const endpoint = endpoints.find(endpoint => endpoint.name === name); - if (endpoint) { - endpoint.port = port; - endpoint.dnsName = dnsName; - } else { - const newEndpoint: ServiceEndpoint = { - name: name, - serviceType: 'NodePort', - port: port - }; - // for newly added endpoint, we cannot have blank value for the dnsName, only set it if it is not empty - if (dnsName) { - newEndpoint.dnsName = dnsName; - } - endpoints.push(newEndpoint); - } - } - - private splitByComma(value: string): string[] { - // split by comma, then remove trailing spaces for each item and finally remove the empty values. - return value.split(',').map(v => v && v.trim()).filter(v => v !== '' && v !== undefined); - } -} diff --git a/extensions/resource-deployment/src/services/resourceTypeService.ts b/extensions/resource-deployment/src/services/resourceTypeService.ts index 3180449d87..cf33b5ea1d 100644 --- a/extensions/resource-deployment/src/services/resourceTypeService.ts +++ b/extensions/resource-deployment/src/services/resourceTypeService.ts @@ -9,8 +9,7 @@ import * as os from 'os'; import * as path from 'path'; import * as vscode from 'vscode'; import * as nls from 'vscode-nls'; -import { DeploymentProvider, instanceOfAzureSQLVMDeploymentProvider, instanceOfAzureSQLDBDeploymentProvider, instanceOfCommandDeploymentProvider, instanceOfDialogDeploymentProvider, instanceOfDownloadDeploymentProvider, instanceOfNotebookBasedDialogInfo, instanceOfNotebookDeploymentProvider, instanceOfNotebookWizardDeploymentProvider, instanceOfWebPageDeploymentProvider, instanceOfWizardDeploymentProvider, NotebookInfo, NotebookPathInfo, ResourceType, ResourceTypeOption, ResourceSubType, AgreementInfo, HelpText, InitialVariableValues } from '../interfaces'; -import { AzdataService } from './azdataService'; +import { DeploymentProvider, instanceOfAzureSQLVMDeploymentProvider, instanceOfAzureSQLDBDeploymentProvider, instanceOfCommandDeploymentProvider, instanceOfDialogDeploymentProvider, instanceOfDownloadDeploymentProvider, instanceOfNotebookBasedDialogInfo, instanceOfNotebookDeploymentProvider, instanceOfNotebookWizardDeploymentProvider, instanceOfWebPageDeploymentProvider, NotebookInfo, NotebookPathInfo, ResourceType, ResourceTypeOption, ResourceSubType, AgreementInfo, HelpText, InitialVariableValues } from '../interfaces'; import { KubeService } from './kubeService'; import { INotebookService } from './notebookService'; import { IPlatformService } from './platformService'; @@ -116,9 +115,6 @@ export class ResourceTypeService implements IResourceTypeService { } else if (instanceOfDialogDeploymentProvider(provider) && instanceOfNotebookBasedDialogInfo(provider.dialog)) { this.updateNotebookPath(provider.dialog, extensionPath); } - else if ('bdcWizard' in provider) { - this.updateNotebookPath(provider.bdcWizard, extensionPath); - } else if ('notebookWizard' in provider) { this.updateNotebookPath(provider.notebookWizard, extensionPath); } @@ -245,8 +241,7 @@ export class ResourceTypeService implements IResourceTypeService { let providerIndex = 1; resourceType.providers.forEach(provider => { const providerPositionInfo = `${positionInfo}, provider index: ${providerIndex} `; - if (!instanceOfWizardDeploymentProvider(provider) - && !instanceOfNotebookWizardDeploymentProvider(provider) + if (!instanceOfNotebookWizardDeploymentProvider(provider) && !instanceOfDialogDeploymentProvider(provider) && !instanceOfNotebookDeploymentProvider(provider) && !instanceOfDownloadDeploymentProvider(provider) @@ -328,7 +323,7 @@ export class ResourceTypeService implements IResourceTypeService { } public startDeployment(resourceType: ResourceType, optionValuesFilter?: OptionValuesFilter, initialVariableValues?: InitialVariableValues): void { - const wizard = new ResourceTypeWizard(resourceType, new KubeService(), new AzdataService(this.platformService), this.notebookService, this.toolsService, this.platformService, this, optionValuesFilter, initialVariableValues); + const wizard = new ResourceTypeWizard(resourceType, new KubeService(), this.notebookService, this.toolsService, this.platformService, this, optionValuesFilter, initialVariableValues); wizard.open(); } diff --git a/extensions/resource-deployment/src/services/tools/azdataTool.ts b/extensions/resource-deployment/src/services/tools/azdataTool.ts deleted file mode 100644 index 24d7fc33ff..0000000000 --- a/extensions/resource-deployment/src/services/tools/azdataTool.ts +++ /dev/null @@ -1,182 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import { EOL } from 'os'; -import * as path from 'path'; -import { SemVer } from 'semver'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import { AzdataInstallLocationKey, DeploymentConfigurationKey } from '../../constants'; -import { Command, OsDistribution, ToolType } from '../../interfaces'; -import { IPlatformService } from '../platformService'; -import { dependencyType, ToolBase } from './toolBase'; -import { SemVerProxy } from './SemVerProxy'; - -const localize = nls.loadMessageBundle(); -export const AzdataToolName = 'azdata'; -const win32InstallationRoot = `${process.env['ProgramFiles(x86)']}\\Microsoft SDKs\\Azdata\\CLI\\wbin`; -const macInstallationRoot = '/usr/local/bin'; -const debianInstallationRoot = '/usr/local/bin'; - -export class AzdataTool extends ToolBase { - constructor(platformService: IPlatformService) { - super(platformService); - } - - get name(): string { - return AzdataToolName; - } - - get description(): string { - return localize('resourceDeployment.AzdataDescription', "Azure Data command line interface"); - } - - get type(): ToolType { - return ToolType.Azdata; - } - - get displayName(): string { - return localize('resourceDeployment.AzdataDisplayName', "Azure Data CLI"); - } - - get homePage(): string { - return 'https://docs.microsoft.com/sql/big-data-cluster/deploy-install-azdata'; - } - - protected get versionCommand(): Command { - return { - command: 'azdata -v' - }; - } - - protected get discoveryCommand(): Command { - return { - command: this.discoveryCommandString('azdata') - }; - } - - protected getVersionFromOutput(output: string): SemVer | undefined { - let version: SemVer | undefined = undefined; - if (output && output.split(EOL).length > 0) { - version = new SemVerProxy(output.split(EOL)[0].replace(/ /g, '')); - } - return version; - } - protected override async getSearchPaths(): Promise { - switch (this.osDistribution) { - case OsDistribution.win32: - return [win32InstallationRoot]; - case OsDistribution.darwin: - return [macInstallationRoot]; - case OsDistribution.debian: - return [debianInstallationRoot]; - default: - const azdataCliInstallLocation = await this.getPip3InstallLocation('azdata-cli'); - if (azdataCliInstallLocation) { - return [path.join(azdataCliInstallLocation, '..', 'Scripts'), path.join(azdataCliInstallLocation, '..', '..', '..', 'bin')]; - } else { - return []; - } - } - } - - protected get allInstallationCommands(): Map { - return new Map([ - [OsDistribution.debian, this.debianInstallationCommands], - [OsDistribution.win32, this.win32InstallationCommands], - [OsDistribution.darwin, this.macOsInstallationCommands], - [OsDistribution.others, []] - ]); - } - - - private get azdataInstallLocation(): string { - return vscode.workspace.getConfiguration(DeploymentConfigurationKey)[AzdataInstallLocationKey] || this.defaultInstallLocationByDistribution.get(this.osDistribution); - } - - private defaultInstallLocationByDistribution: Map = new Map([ - [OsDistribution.debian, 'https://packages.microsoft.com/config/ubuntu/16.04/mssql-server-2019.list'], - [OsDistribution.win32, 'https://aka.ms/azdata-msi'], - [OsDistribution.darwin, 'microsoft/azdata-cli-release'], - [OsDistribution.others, ''] - ]); - - protected override dependenciesByOsType: Map = new Map([ - [OsDistribution.debian, []], - [OsDistribution.win32, []], - [OsDistribution.darwin, [dependencyType.Brew]], - [OsDistribution.others, []] - ]); - - private get win32InstallationCommands() { - return [ - { - comment: localize('resourceDeployment.Azdata.DeletingPreviousAzdata.msi', "deleting previously downloaded Azdata.msi if one exists …"), - command: `IF EXIST .\\Azdata.msi DEL /F .\\Azdata.msi` - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.DownloadingAndInstallingAzdata', "downloading Azdata.msi and installing azdata-cli …"), - command: `powershell -NoLogo -NonInteractive -NoProfile -Command "& {try {(New-Object System.Net.WebClient).DownloadFile('${this.azdataInstallLocation}', 'Azdata.msi'); Start-Process msiexec.exe -Wait -ArgumentList '/I Azdata.msi /passive /quiet /lvx ADS_AzdataInstall.log'} catch { Write-Error $_.Exception; exit 1 }}"` - }, - { - comment: localize('resourceDeployment.Azdata.DisplayingInstallationLog', "displaying the installation log …"), - command: `type ADS_AzdataInstall.log | findstr /i /v ^MSI"`, - ignoreError: true - } - ]; - } - - private get macOsInstallationCommands() { - return [ - { - comment: localize('resourceDeployment.Azdata.TappingBrewRepository', "tapping into the brew repository for azdata-cli …"), - command: `brew tap ${this.azdataInstallLocation}` - }, - { - comment: localize('resourceDeployment.Azdata.UpdatingBrewRepository', "updating the brew repository for azdata-cli installation …"), - command: 'brew update' - }, - { - comment: localize('resourceDeployment.Azdata.InstallingAzdata', "installing azdata …"), - command: 'brew install azdata-cli' - } - ]; - } - - private get debianInstallationCommands() { - return [ - { - sudo: true, - comment: localize('resourceDeployment.Azdata.AptGetUpdate', "updating repository information …"), - command: 'apt-get update' - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.AptGetPackages', "getting packages needed for azdata installation …"), - command: 'apt-get install gnupg ca-certificates curl apt-transport-https lsb-release -y' - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.DownloadAndInstallingSigningKey', "downloading and installing the signing key for azdata …"), - command: 'wget -qO- https://packages.microsoft.com/keys/microsoft.asc | apt-key add -' - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.AddingAzdataRepositoryInformation', "adding the azdata repository information …"), - command: `add-apt-repository "$(wget -qO- ${this.azdataInstallLocation})"` - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.AptGetUpdate', "updating repository information …"), - command: 'apt-get update' - }, - { - sudo: true, - comment: localize('resourceDeployment.Azdata.InstallingAzdata', "installing azdata …"), - command: 'apt-get install -y azdata-cli' - } - ]; - } -} diff --git a/extensions/resource-deployment/src/services/toolsService.ts b/extensions/resource-deployment/src/services/toolsService.ts index 25c85dd9fa..f2314bd2b1 100644 --- a/extensions/resource-deployment/src/services/toolsService.ts +++ b/extensions/resource-deployment/src/services/toolsService.ts @@ -7,7 +7,6 @@ import { DockerTool } from './tools/dockerTool'; import { AzCliTool } from './tools/azCliTool'; import { KubeCtlTool } from './tools/kubeCtlTool'; import { IPlatformService } from './platformService'; -import { AzdataTool } from './tools/azdataTool'; export interface IToolsService { getToolByName(toolName: string): ITool | undefined; @@ -23,7 +22,6 @@ export class ToolsService implements IToolsService { [ new DockerTool(this._platformService), new AzCliTool(this._platformService), - new AzdataTool(this._platformService), new KubeCtlTool(this._platformService) ].map<[string, ITool]>((tool: ITool) => [tool.name, tool]) ); diff --git a/extensions/resource-deployment/src/test/services/azdataService.test.ts b/extensions/resource-deployment/src/test/services/azdataService.test.ts deleted file mode 100644 index 2e039fb8a9..0000000000 --- a/extensions/resource-deployment/src/test/services/azdataService.test.ts +++ /dev/null @@ -1,57 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import 'mocha'; -import * as TypeMoq from 'typemoq'; -import * as should from 'should'; -import { IPlatformService, CommandOptions } from '../../services/platformService'; -import { AzdataService } from '../../services/azdataService'; -import { BdcDeploymentType } from '../../interfaces'; - -describe('azdata service Tests', function (): void { - it('azdata service handles deployment types properly', async () => { - const mockPlatformService = TypeMoq.Mock.ofType(); - const azdataService = new AzdataService(mockPlatformService.object); - mockPlatformService.setup((service) => service.runCommand(TypeMoq.It.isAnyString(), TypeMoq.It.isAny())).returns((command: string, options: CommandOptions | undefined) => { - return new Promise((resolve) => { - resolve('{"result":[]}'); - }); - }); - - azdataService.getDeploymentProfiles(BdcDeploymentType.ExistingAKS); - azdataService.getDeploymentProfiles(BdcDeploymentType.ExistingARO); - azdataService.getDeploymentProfiles(BdcDeploymentType.ExistingKubeAdm); - azdataService.getDeploymentProfiles(BdcDeploymentType.ExistingOpenShift); - azdataService.getDeploymentProfiles(BdcDeploymentType.NewAKS); - - should(azdataService.getDeploymentProfiles('no-such-type')).rejected(); - mockPlatformService.verify((service) => service.runCommand(TypeMoq.It.isAnyString(), TypeMoq.It.isAny()), TypeMoq.Times.exactly(5)); - }); - - it('azdata service returns correct deployment profiles', async () => { - const mockPlatformService = TypeMoq.Mock.ofType(); - const azdataService = new AzdataService(mockPlatformService.object); - mockPlatformService.setup((service => service.storagePath())).returns(() => { - return ''; - }); - mockPlatformService.setup((service => service.readTextFile(TypeMoq.It.isAnyString()))).returns((path: string) => { - return new Promise((resolve) => { - resolve('{}'); - }); - }); - mockPlatformService.setup((service) => service.runCommand(TypeMoq.It.isAnyString(), TypeMoq.It.isAny())).returns((command: string, options: CommandOptions | undefined) => { - if (command === 'azdata bdc config list -o json') { - return Promise.resolve('{"result":["aks-1","profile-2"]}'); - } else if (command.startsWith('azdata bdc config init')) { - return Promise.resolve(''); - } - else { - return Promise.reject(`unexpected command: ${command}`); - } - }); - const profiles = await azdataService.getDeploymentProfiles(BdcDeploymentType.NewAKS); - should(profiles.length).be.exactly(1); - }); -}); diff --git a/extensions/resource-deployment/src/test/services/resourceTypeService.test.ts b/extensions/resource-deployment/src/test/services/resourceTypeService.test.ts index 04a98c924d..b3fc99f52e 100644 --- a/extensions/resource-deployment/src/test/services/resourceTypeService.test.ts +++ b/extensions/resource-deployment/src/test/services/resourceTypeService.test.ts @@ -24,13 +24,13 @@ describe('Resource Type Service Tests', function (): void { // index 0: platform name, index 1: expected resource types const platforms: { platform: string; resourceTypes: string[] }[] = [ { - platform: 'win32', resourceTypes: ['sql-image', 'sql-bdc', 'sql-windows-setup'] + platform: 'win32', resourceTypes: ['sql-image', 'sql-windows-setup'] }, { - platform: 'darwin', resourceTypes: ['sql-image', 'sql-bdc'] + platform: 'darwin', resourceTypes: ['sql-image'] }, { - platform: 'linux', resourceTypes: ['sql-image', 'sql-bdc'] + platform: 'linux', resourceTypes: ['sql-image'] } ]; platforms.forEach(platformInfo => { diff --git a/extensions/resource-deployment/src/test/services/toolsService.test.ts b/extensions/resource-deployment/src/test/services/toolsService.test.ts index 8d745e808d..72617f0e87 100644 --- a/extensions/resource-deployment/src/test/services/toolsService.test.ts +++ b/extensions/resource-deployment/src/test/services/toolsService.test.ts @@ -9,14 +9,12 @@ import * as TypeMoq from 'typemoq'; import { ToolsService } from '../../services/toolsService'; import { ITool, ToolType } from '../../interfaces'; import { IPlatformService } from '../../services/platformService'; -import { AzdataToolName } from '../../services/tools/azdataTool'; const tools: { name: string; type: ToolType }[] = [ { name: 'azure-cli', type: ToolType.AzCli }, { name: 'docker', type: ToolType.Docker }, - { name: 'kubectl', type: ToolType.KubeCtl }, - { name: AzdataToolName, type: ToolType.Azdata } + { name: 'kubectl', type: ToolType.KubeCtl } ]; const mockPlatformService = TypeMoq.Mock.ofType(); const toolsService = new ToolsService(mockPlatformService.object); diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/constants.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/constants.ts deleted file mode 100644 index 790506b546..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/constants.ts +++ /dev/null @@ -1,73 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -export const DeploymentProfile_VariableName = 'AZDATA_NB_VAR_BDC_DEPLOYMENT_PROFILE'; -export const ClusterName_VariableName = 'AZDATA_NB_VAR_BDC_CLUSTER_NAME'; -export const AdminUserName_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_USERNAME'; -export const AdminPassword_VariableName = 'AZDATA_NB_VAR_BDC_ADMIN_PASSWORD'; -export const AuthenticationMode_VariableName = 'AZDATA_NB_VAR_BDC_AUTHENTICATION_MODE'; -export const OrganizationalUnitDistinguishedName_VariableName = 'AZDATA_NB_VAR_BDC_AD_OUDN'; -export const ClusterAdmins_VariableName = 'AZDATA_NB_VAR_BDC_AD_CLUSTER_ADMINS'; -export const ClusterUsers_VariableName = 'AZDATA_NB_VAR_BDC_AD_CLUSTER_USERS'; -export const DomainDNSIPAddresses_VariableName = 'AZDATA_NB_VAR_BDC_AD_UPSTREAM_IPADDRESSES'; -export const DomainControllerFQDNs_VariableName = 'AZDATA_NB_VAR_BDC_AD_DC_FQDNs'; -export const DomainDNSName_VariableName = 'AZDATA_NB_VAR_BDC_AD_DOMAIN_DNS_NAME'; -export const Realm_VariableName = 'AZDATA_NB_VAR_BDC_AD_REALM'; -export const DomainServiceAccountUserName_VariableName = 'AZDATA_NB_VAR_BDC_AD_DOMAIN_SVC_USERNAME'; -export const DomainServiceAccountPassword_VariableName = 'AZDATA_NB_VAR_BDC_AD_DOMAIN_SVC_PASSWORD'; -export const AppOwners_VariableName = 'AZDATA_NB_VAR_BDC_AD_APP_OWNERS'; -export const AppReaders_VariableName = 'AZDATA_NB_VAR_AD_BDC_AD_APP_READERS'; -export const Subdomain_VariableName = 'AZDATA_NB_VAR_BDC_AD_SUBDOMAIN'; -export const AccountPrefix_VariableName = 'AZDATA_NB_VAR_BDC_AD_ACCOUNTPREFIX'; -export const SubscriptionId_VariableName = 'AZDATA_NB_VAR_BDC_AZURE_SUBSCRIPTION'; -export const ResourceGroup_VariableName = 'AZDATA_NB_VAR_BDC_RESOURCEGROUP_NAME'; -export const Location_VariableName = 'AZDATA_NB_VAR_BDC_AZURE_REGION'; -export const AksName_VariableName = 'AZDATA_NB_VAR_BDC_AKS_NAME'; -export const VMSize_VariableName = 'AZDATA_NB_VAR_BDC_AZURE_VM_SIZE'; -export const VMCount_VariableName = 'AZDATA_NB_VAR_BDC_VM_COUNT'; -export const KubeConfigPath_VariableName = 'AZDATA_NB_VAR_BDC_KUBECONFIG_PATH'; -export const ClusterContext_VariableName = 'AZDATA_NB_VAR_BDC_CLUSTER_CONTEXT'; -export const SQLServerScale_VariableName = 'AZDATA_NB_VAR_BDC_SQLSERVER_SCALE'; -export const HDFSPoolScale_VariableName = 'AZDATA_NB_VAR_BDC_HDFSPOOL_SCALE'; -export const HDFSNameNodeScale_VariableName = 'AZDATA_NB_VAR_BDC_NAMENODE_SCALE'; -export const ZooKeeperScale_VariableName = 'AZDATA_NB_VAR_BDC_ZOOKEEPER_SCALE'; -export const SparkHeadScale_VariableName = 'AZDATA_NB_VAR_BDC_SPARKHEAD_SCALE'; -export const IncludeSpark_VariableName = 'AZDATA_NB_VAR_BDC_INCLUDESPARK'; -export const ComputePoolScale_VariableName = 'AZDATA_NB_VAR_BDC_COMPUTEPOOL_SCALE'; -export const DataPoolScale_VariableName = 'AZDATA_NB_VAR_BDC_DATAPOOL_SCALE'; -export const SparkPoolScale_VariableName = 'AZDATA_NB_VAR_BDC_SPARKPOOL_SCALE'; -export const ControllerDataStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_DATA_STORAGE_CLASS'; -export const ControllerDataStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_DATA_STORAGE_SIZE'; -export const ControllerLogsStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_LOGS_STORAGE_CLASS'; -export const ControllerLogsStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_LOGS_STORAGE_SIZE'; -export const DataPoolDataStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_DATA_DATA_STORAGE_CLASS'; -export const DataPoolDataStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_DATA_DATA_STORAGE_SIZE'; -export const DataPoolLogsStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_DATA_LOGS_STORAGE_CLASS'; -export const DataPoolLogsStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_DATA_LOGS_STORAGE_SIZE'; -export const HDFSDataStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_HDFS_DATA_STORAGE_CLASS'; -export const HDFSDataStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_HDFS_DATA_STORAGE_SIZE'; -export const HDFSLogsStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_HDFS_LOGS_STORAGE_CLASS'; -export const HDFSLogsStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_HDFS_LOGS_STORAGE_SIZE'; -export const SQLServerDataStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_SQL_DATA_STORAGE_CLASS'; -export const SQLServerDataStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_SQL_DATA_STORAGE_SIZE'; -export const SQLServerLogsStorageClassName_VariableName = 'AZDATA_NB_VAR_BDC_SQL_LOGS_STORAGE_CLASS'; -export const SQLServerLogsStorageSize_VariableName = 'AZDATA_NB_VAR_BDC_SQL_LOGS_STORAGE_SIZE'; -export const ControllerDNSName_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_DNS'; -export const ControllerPort_VariableName = 'AZDATA_NB_VAR_BDC_CONTROLLER_PORT'; -export const SQLServerDNSName_VariableName = 'AZDATA_NB_VAR_BDC_SQL_DNS'; -export const SQLServerPort_VariableName = 'AZDATA_NB_VAR_BDC_SQL_PORT'; -export const GatewayDNSName_VariableName = 'AZDATA_NB_VAR_BDC_GATEWAY_DNS'; -export const GateWayPort_VariableName = 'AZDATA_NB_VAR_BDC_GATEWAY_PORT'; -export const ReadableSecondaryDNSName_VariableName = 'AZDATA_NB_VAR_BDC_READABLE_SECONDARY_DNS'; -export const ReadableSecondaryPort_VariableName = 'AZDATA_NB_VAR_BDC_READABLE_SECONDARY_PORT'; -export const ServiceProxyDNSName_VariableName = 'AZDATA_NB_VAR_BDC_SERVICEPROXY_DNS'; -export const ServiceProxyPort_VariableName = 'AZDATA_NB_VAR_BDC_SERVICEPROXY_PORT'; -export const AppServiceProxyDNSName_VariableName = 'AZDATA_NB_VAR_BDC_APPSERVICEPROXY_DNS'; -export const AppServiceProxyPort_VariableName = 'AZDATA_NB_VAR_BDC_APPSERVICEPROXY_PORT'; -export const DockerRepository_VariableName = 'AZDATA_NB_VAR_BDC_REPOSITORY'; -export const DockerRegistry_VariableName = 'AZDATA_NB_VAR_BDC_REGISTRY'; -export const DockerImageTag_VariableName = 'AZDATA_NB_VAR_BDC_DOCKER_IMAGE_TAG'; -export const DockerUsername_VariableName = 'AZDATA_NB_VAR_BDC_DOCKER_USERNAME'; -export const DockerPassword_VariableName = 'AZDATA_NB_VAR_BDC_DOCKER_PASSWORD'; diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/deployClusterWizardModel.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/deployClusterWizardModel.ts deleted file mode 100644 index b30f09bfd9..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/deployClusterWizardModel.ts +++ /dev/null @@ -1,356 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import { delimiter, join } from 'path'; -import * as fs from 'fs'; -import * as os from 'os'; -import { BdcDeploymentType, BdcWizardDeploymentProvider, ITool } from '../../interfaces'; -import { BigDataClusterDeploymentProfile, DataResource, HdfsResource, SqlServerMasterResource } from '../../services/bigDataClusterDeploymentProfile'; -import { KubeCtlToolName } from '../../services/tools/kubeCtlTool'; -import { getErrorMessage, getRuntimeBinaryPathEnvironmentVariableName, setEnvironmentVariablesForInstallPaths } from '../../common/utils'; -import { ToolsInstallPath } from '../../constants'; -import * as VariableNames from './constants'; -import { ResourceTypeWizard } from '../resourceTypeWizard'; -import * as nls from 'vscode-nls'; -import { InputComponents } from '../modelViewUtils'; -import { INotebookService } from '../../services/notebookService'; -import { IAzdataService } from '../../services/azdataService'; -import { IKubeService } from '../../services/kubeService'; -import { DeploymentProfilePage } from './pages/deploymentProfilePage'; -import { AzureSettingsPage } from './pages/azureSettingsPage'; -import { ClusterSettingsPage } from './pages/clusterSettingsPage'; -import { ServiceSettingsPage } from './pages/serviceSettingsPage'; -import { SummaryPage } from './pages/summaryPage'; -import { TargetClusterContextPage } from './pages/targetClusterPage'; -import { IToolsService } from '../../services/toolsService'; -import { ResourceTypeModel } from '../resourceTypeModel'; -import { ResourceTypePage } from '../resourceTypePage'; -const localize = nls.loadMessageBundle(); - -export class DeployClusterWizardModel extends ResourceTypeModel { - private _inputComponents: InputComponents = {}; - private _kubeService: IKubeService; - private _azdataService: IAzdataService; - private _notebookService: INotebookService; - private toolsService: IToolsService; - - private _saveConfigButton: azdata.window.Button; - - public get kubeService(): IKubeService { - return this._kubeService; - } - - public get azdataService(): IAzdataService { - return this._azdataService; - } - - public get notebookService(): INotebookService { - return this._notebookService; - } - - public get inputComponents(): InputComponents { - return this._inputComponents; - } - - public showCustomButtons(): void { - this._saveConfigButton.hidden = false; - } - - public hideCustomButtons(): void { - this._saveConfigButton.hidden = true; - } - - - public get deploymentType(): BdcDeploymentType { - return this.bdcProvider.bdcWizard.type; - } - - initialize(): void { - this.wizard.setPages(this.getPages()); - this.wizard.wizardObject.generateScriptButton.hidden = true; - this.wizard.wizardObject.doneButton.label = localize('deployCluster.ScriptToNotebook', "Script to Notebook"); - } - - async onOk(): Promise { - await this.scriptToNotebook(); - } - - constructor(public bdcProvider: BdcWizardDeploymentProvider, wizard: ResourceTypeWizard) { - super(bdcProvider, wizard); - this._kubeService = this.wizard._kubeService; - this._azdataService = this.wizard.azdataService; - this._notebookService = this.wizard.notebookService; - this.toolsService = this.wizard.toolsService; - this.wizard.wizardObject.title = this.getTitle(this.deploymentType); - this._saveConfigButton = azdata.window.createButton(localize('deployCluster.SaveConfigFiles', "Save config files"), 'left'); - this._saveConfigButton.hidden = true; - this.wizard.addButton(this._saveConfigButton); - this.wizard.registerDisposable(this._saveConfigButton.onClick(() => this.saveConfigFiles())); - } - public adAuthSupported: boolean = false; - - public get authenticationMode(): string | undefined { - return this.getStringValue(VariableNames.AuthenticationMode_VariableName); - } - - public set authenticationMode(value: string | undefined) { - this.setPropertyValue(VariableNames.AuthenticationMode_VariableName, value); - } - - public getStorageSettingValue(propertyName: string, defaultValuePropertyName: string): string | undefined { - const value = this.getStringValue(propertyName); - return (value === undefined || value === '') ? this.getStringValue(defaultValuePropertyName) : value; - } - - private setStorageSettingValue(propertyName: string, defaultValuePropertyName: string): void { - const value = this.getStringValue(propertyName); - if (value === undefined || value === '') { - this.setPropertyValue(propertyName, this.getStringValue(defaultValuePropertyName)); - } - } - - private setStorageSettingValues(): void { - this.setStorageSettingValue(VariableNames.DataPoolDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.DataPoolDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName); - this.setStorageSettingValue(VariableNames.DataPoolLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.DataPoolLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName); - - this.setStorageSettingValue(VariableNames.HDFSDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.HDFSDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName); - this.setStorageSettingValue(VariableNames.HDFSLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.HDFSLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName); - - this.setStorageSettingValue(VariableNames.SQLServerDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.SQLServerDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName); - this.setStorageSettingValue(VariableNames.SQLServerLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName); - this.setStorageSettingValue(VariableNames.SQLServerLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName); - } - - public override setEnvironmentVariables(): void { - this.setStorageSettingValues(); - } - - public selectedProfile: BigDataClusterDeploymentProfile | undefined; - - public createTargetProfile(): BigDataClusterDeploymentProfile { - // create a copy of the source files to avoid changing the source profile values - const sourceBdcJson = Object.assign({}, this.selectedProfile!.bdcConfig); - const sourceControlJson = Object.assign({}, this.selectedProfile!.controlConfig); - const targetDeploymentProfile = new BigDataClusterDeploymentProfile('', sourceBdcJson, sourceControlJson); - // docker settings - targetDeploymentProfile.controlConfig.spec.docker = { - registry: this.getStringValue(VariableNames.DockerRegistry_VariableName), - repository: this.getStringValue(VariableNames.DockerRepository_VariableName), - imageTag: this.getStringValue(VariableNames.DockerImageTag_VariableName), - imagePullPolicy: 'Always' - }; - // cluster name - targetDeploymentProfile.clusterName = this.getStringValue(VariableNames.ClusterName_VariableName)!; - // storage settings - targetDeploymentProfile.controllerDataStorageClass = this.getStringValue(VariableNames.ControllerDataStorageClassName_VariableName)!; - targetDeploymentProfile.controllerDataStorageSize = this.getIntegerValue(VariableNames.ControllerDataStorageSize_VariableName)!; - targetDeploymentProfile.controllerLogsStorageClass = this.getStringValue(VariableNames.ControllerLogsStorageClassName_VariableName)!; - targetDeploymentProfile.controllerLogsStorageSize = this.getIntegerValue(VariableNames.ControllerLogsStorageSize_VariableName)!; - targetDeploymentProfile.setResourceStorage(DataResource, - this.getStorageSettingValue(VariableNames.DataPoolDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.DataPoolDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName)!), - this.getStorageSettingValue(VariableNames.DataPoolLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.DataPoolLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName)!) - ); - targetDeploymentProfile.setResourceStorage(SqlServerMasterResource, - this.getStorageSettingValue(VariableNames.SQLServerDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.SQLServerDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName)!), - this.getStorageSettingValue(VariableNames.SQLServerLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.SQLServerLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName)!) - ); - targetDeploymentProfile.setResourceStorage(HdfsResource, - this.getStorageSettingValue(VariableNames.HDFSDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.HDFSDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName)!), - this.getStorageSettingValue(VariableNames.HDFSLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName)!, - Number.parseInt(this.getStorageSettingValue(VariableNames.HDFSLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName)!) - ); - - // scale settings - targetDeploymentProfile.dataReplicas = this.getIntegerValue(VariableNames.DataPoolScale_VariableName); - targetDeploymentProfile.computeReplicas = this.getIntegerValue(VariableNames.ComputePoolScale_VariableName); - targetDeploymentProfile.hdfsReplicas = this.getIntegerValue(VariableNames.HDFSPoolScale_VariableName); - targetDeploymentProfile.sqlServerReplicas = this.getIntegerValue(VariableNames.SQLServerScale_VariableName); - targetDeploymentProfile.hdfsNameNodeReplicas = this.getIntegerValue(VariableNames.HDFSNameNodeScale_VariableName); - targetDeploymentProfile.sparkHeadReplicas = this.getIntegerValue(VariableNames.SparkHeadScale_VariableName); - targetDeploymentProfile.zooKeeperReplicas = this.getIntegerValue(VariableNames.ZooKeeperScale_VariableName); - const sparkScale = this.getIntegerValue(VariableNames.SparkPoolScale_VariableName); - if (sparkScale > 0) { - targetDeploymentProfile.addSparkResource(sparkScale); - } - - targetDeploymentProfile.includeSpark = this.getBooleanValue(VariableNames.IncludeSpark_VariableName); - - // endpoint settings - targetDeploymentProfile.setGatewayEndpoint(this.getIntegerValue(VariableNames.GateWayPort_VariableName), this.getStringValue(VariableNames.GatewayDNSName_VariableName)); - targetDeploymentProfile.setSqlServerEndpoint(this.getIntegerValue(VariableNames.SQLServerPort_VariableName), this.getStringValue(VariableNames.SQLServerDNSName_VariableName)); - targetDeploymentProfile.setControllerEndpoint(this.getIntegerValue(VariableNames.ControllerPort_VariableName), this.getStringValue(VariableNames.ControllerDNSName_VariableName)); - targetDeploymentProfile.setSqlServerReadableSecondaryEndpoint(this.getIntegerValue(VariableNames.ReadableSecondaryPort_VariableName), this.getStringValue(VariableNames.ReadableSecondaryDNSName_VariableName)); - targetDeploymentProfile.setServiceProxyEndpoint(this.getIntegerValue(VariableNames.ServiceProxyPort_VariableName), this.getStringValue(VariableNames.ServiceProxyDNSName_VariableName)); - targetDeploymentProfile.setAppServiceProxyEndpoint(this.getIntegerValue(VariableNames.AppServiceProxyPort_VariableName), this.getStringValue(VariableNames.AppServiceProxyDNSName_VariableName)); - - targetDeploymentProfile.setAuthenticationMode(this.authenticationMode!); - if (this.authenticationMode === AuthenticationMode.ActiveDirectory) { - targetDeploymentProfile.setActiveDirectorySettings({ - organizationalUnit: this.getStringValue(VariableNames.OrganizationalUnitDistinguishedName_VariableName)!, - domainControllerFQDNs: this.getStringValue(VariableNames.DomainControllerFQDNs_VariableName)!, - domainDNSName: this.getStringValue(VariableNames.DomainDNSName_VariableName)!, - realm: this.getStringValue(VariableNames.Realm_VariableName), - dnsIPAddresses: this.getStringValue(VariableNames.DomainDNSIPAddresses_VariableName)!, - clusterAdmins: this.getStringValue(VariableNames.ClusterAdmins_VariableName)!, - clusterUsers: this.getStringValue(VariableNames.ClusterUsers_VariableName)!, - appOwners: this.getStringValue(VariableNames.AppOwners_VariableName), - appReaders: this.getStringValue(VariableNames.AppReaders_VariableName), - subdomain: this.getStringValue(VariableNames.Subdomain_VariableName), - accountPrefix: this.getStringValue(VariableNames.AccountPrefix_VariableName) - }); - } - return targetDeploymentProfile; - } - - public override getCodeCellContentForNotebook(tools: ITool[]): string[] { - const profile = this.createTargetProfile(); - const statements: string[] = []; - if (this.deploymentType === BdcDeploymentType.NewAKS) { - statements.push(`azure_subscription_id = '${this.getStringValue(VariableNames.SubscriptionId_VariableName, '')}'`); - statements.push(`azure_region = '${this.getStringValue(VariableNames.Location_VariableName)}'`); - statements.push(`azure_resource_group = '${this.getStringValue(VariableNames.ResourceGroup_VariableName)}'`); - statements.push(`azure_vm_size = '${this.getStringValue(VariableNames.VMSize_VariableName)}'`); - statements.push(`azure_vm_count = '${this.getStringValue(VariableNames.VMCount_VariableName)}'`); - statements.push(`aks_cluster_name = '${this.getStringValue(VariableNames.AksName_VariableName)}'`); - } else if (this.deploymentType === BdcDeploymentType.ExistingAKS - || this.deploymentType === BdcDeploymentType.ExistingKubeAdm - || this.deploymentType === BdcDeploymentType.ExistingARO - || this.deploymentType === BdcDeploymentType.ExistingOpenShift) { - statements.push(`mssql_kube_config_path = '${this.escapeForNotebookCodeCell(this.getStringValue(VariableNames.KubeConfigPath_VariableName)!)}'`); - statements.push(`mssql_cluster_context = '${this.getStringValue(VariableNames.ClusterContext_VariableName)}'`); - statements.push('os.environ["KUBECONFIG"] = mssql_kube_config_path'); - } - if (this.authenticationMode === AuthenticationMode.ActiveDirectory) { - statements.push(`mssql_domain_service_account_username = '${this.escapeForNotebookCodeCell(this.getStringValue(VariableNames.DomainServiceAccountUserName_VariableName)!)}'`); - } - statements.push(`mssql_cluster_name = '${this.getStringValue(VariableNames.ClusterName_VariableName)}'`); - statements.push(`mssql_username = '${this.getStringValue(VariableNames.AdminUserName_VariableName)}'`); - statements.push(`mssql_auth_mode = '${this.authenticationMode}'`); - statements.push(`bdc_json = '${profile.getBdcJson(false)}'`); - statements.push(`control_json = '${profile.getControlJson(false)}'`); - if (this.getStringValue(VariableNames.DockerUsername_VariableName) && this.getStringValue(VariableNames.DockerPassword_VariableName)) { - statements.push(`os.environ["DOCKER_USERNAME"] = '${this.getStringValue(VariableNames.DockerUsername_VariableName)}'`); - statements.push(`os.environ["DOCKER_PASSWORD"] = os.environ["${VariableNames.DockerPassword_VariableName}"]`); - } - const kubeCtlEnvVarName: string = getRuntimeBinaryPathEnvironmentVariableName(KubeCtlToolName); - const env: NodeJS.ProcessEnv = {}; - setEnvironmentVariablesForInstallPaths(tools, env); - statements.push(`os.environ["${kubeCtlEnvVarName}"] = "${this.escapeForNotebookCodeCell(env[kubeCtlEnvVarName]!)}"`); - statements.push(`os.environ["PATH"] = os.environ["PATH"] + "${delimiter}" + "${this.escapeForNotebookCodeCell(env[ToolsInstallPath]!)}"`); - statements.push(`print('Variables have been set successfully.')`); - return statements.map(line => line + os.EOL); - } - - private async saveConfigFiles(): Promise { - const options: vscode.OpenDialogOptions = { - defaultUri: vscode.Uri.file(os.homedir()), - canSelectFiles: false, - canSelectFolders: true, - canSelectMany: false, - openLabel: localize('deployCluster.SelectConfigFileFolder', "Save config files") - }; - const pathArray = await vscode.window.showOpenDialog(options); - if (pathArray && pathArray[0]) { - const targetFolder = pathArray[0].fsPath; - try { - const profile = this.createTargetProfile(); - await fs.promises.writeFile(join(targetFolder, 'bdc.json'), profile.getBdcJson()); - await fs.promises.writeFile(join(targetFolder, 'control.json'), profile.getControlJson()); - this.wizard.wizardObject.message = { - text: localize('deployCluster.SaveConfigFileSucceeded', "Config files saved to {0}", targetFolder), - level: azdata.window.MessageLevel.Information - }; - } - catch (error) { - this.wizard.wizardObject.message = { - text: error.message, - level: azdata.window.MessageLevel.Error - }; - } - } - } - - private getPages(): ResourceTypePage[] { - const pages: ResourceTypePage[] = []; - switch (this.deploymentType) { - case BdcDeploymentType.NewAKS: - pages.push( - new DeploymentProfilePage(this), - new AzureSettingsPage(this), - new ClusterSettingsPage(this), - new ServiceSettingsPage(this), - new SummaryPage(this)); - break; - case BdcDeploymentType.ExistingAKS: - case BdcDeploymentType.ExistingKubeAdm: - case BdcDeploymentType.ExistingARO: - case BdcDeploymentType.ExistingOpenShift: - pages.push( - new DeploymentProfilePage(this), - new TargetClusterContextPage(this), - new ClusterSettingsPage(this), - new ServiceSettingsPage(this), - new SummaryPage(this)); - break; - default: - throw new Error(`Unknown deployment type: ${this.deploymentType}`); - } - return pages; - } - - private async scriptToNotebook(): Promise { - this.setNotebookEnvironmentVariables(process.env); - const variableValueStatements = this.getCodeCellContentForNotebook(this.toolsService.toolsForCurrentProvider); - const insertionPosition = 5; // Cell number 5 is the position where the python variable setting statements need to be inserted in this.wizardInfo.notebook. - try { - await this.notebookService.openNotebookWithEdits(this.bdcProvider.bdcWizard.notebook, variableValueStatements, insertionPosition); - } catch (error) { - vscode.window.showErrorMessage(getErrorMessage(error)); - } - } - - - private setNotebookEnvironmentVariables(env: NodeJS.ProcessEnv): void { - env[VariableNames.AdminPassword_VariableName] = this.getStringValue(VariableNames.AdminPassword_VariableName); - env[VariableNames.DockerPassword_VariableName] = this.getStringValue(VariableNames.DockerPassword_VariableName); - if (this.authenticationMode === AuthenticationMode.ActiveDirectory) { - env[VariableNames.DomainServiceAccountPassword_VariableName] = this.getStringValue(VariableNames.DomainServiceAccountPassword_VariableName); - } - } - - private getTitle(type: BdcDeploymentType): string { - switch (type) { - case BdcDeploymentType.NewAKS: - return localize('deployCluster.NewAKSWizardTitle', "Deploy SQL Server 2019 Big Data Cluster on a new AKS cluster"); - case BdcDeploymentType.ExistingAKS: - return localize('deployCluster.ExistingAKSWizardTitle', "Deploy SQL Server 2019 Big Data Cluster on an existing AKS cluster"); - case BdcDeploymentType.ExistingKubeAdm: - return localize('deployCluster.ExistingKubeAdm', "Deploy SQL Server 2019 Big Data Cluster on an existing kubeadm cluster"); - case BdcDeploymentType.ExistingARO: - return localize('deployCluster.ExistingARO', "Deploy SQL Server 2019 Big Data Cluster on an existing Azure Red Hat OpenShift cluster"); - case BdcDeploymentType.ExistingOpenShift: - return localize('deployCluster.ExistingOpenShift', "Deploy SQL Server 2019 Big Data Cluster on an existing OpenShift cluster"); - - default: - throw new Error(`Unknown deployment type: ${type}`); - } - } -} - -export enum AuthenticationMode { - ActiveDirectory = 'ad', - Basic = 'basic' -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/azureSettingsPage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/azureSettingsPage.ts deleted file mode 100644 index bdc66a9cdc..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/azureSettingsPage.ts +++ /dev/null @@ -1,186 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import { FieldType, LabelPosition, SectionInfo } from '../../../interfaces'; -import { createSection, getDropdownComponent, InputComponent, InputComponentInfo, InputComponents, setModelValues, Validator } from '../../modelViewUtils'; -import { AksName_VariableName, Location_VariableName, ResourceGroup_VariableName, SubscriptionId_VariableName, VMCount_VariableName, VMSize_VariableName } from '../constants'; -import { AzureRegion } from 'azurecore'; -import { DeployClusterWizardModel } from '../deployClusterWizardModel'; -import { ResourceTypePage } from '../../resourceTypePage'; -const localize = nls.loadMessageBundle(); -const MissingRequiredInformationErrorMessage = localize('deployCluster.MissingRequiredInfoError', "Please fill out the required fields marked with red asterisks."); - -export class AzureSettingsPage extends ResourceTypePage { - private inputComponents: InputComponents = {}; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.AzureSettingsPageTitle', "Azure settings"), - localize('deployCluster.AzureSettingsPageDescription', "Configure the settings to create an Azure Kubernetes Service cluster"), _model.wizard); - } - - public initialize(): void { - const self = this; - const azureSection: SectionInfo = { - title: '', - labelPosition: LabelPosition.Left, - spaceBetweenFields: '5px', - rows: [{ - items: [{ - type: FieldType.Text, - label: localize('deployCluster.SubscriptionField', "Subscription id"), - required: false, - variableName: SubscriptionId_VariableName, - placeHolder: localize('deployCluster.SubscriptionPlaceholder', "Use my default Azure subscription"), - description: localize('deployCluster.SubscriptionDescription', "The default subscription will be used if you leave this field blank.") - }, { - type: FieldType.ReadonlyText, - label: '{0}', - links: [ - { - text: localize('deployCluster.SubscriptionHelpLink', "View available Azure subscriptions"), - url: 'https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade' - } - ] - }] - }, { - items: [{ - type: FieldType.DateTimeText, - label: localize('deployCluster.ResourceGroupName', "New resource group name"), - required: true, - variableName: ResourceGroup_VariableName, - defaultValue: 'mssql-' - }] - }, { - items: [{ - type: FieldType.Options, - label: localize('deployCluster.Location', "Location"), - required: true, - variableName: Location_VariableName, - defaultValue: AzureRegion.eastus, - editable: true, - // The options are not localized because this is an editable dropdown, - // It would cause confusion to user about what value to type in, if they type in the localized value, we don't know how to process. - options: [ - AzureRegion.centralus, - AzureRegion.eastus, - AzureRegion.eastus2, - AzureRegion.northcentralus, - AzureRegion.southcentralus, - AzureRegion.westus, - AzureRegion.westus2, - AzureRegion.canadacentral, - AzureRegion.canadaeast - ] - }, { - type: FieldType.ReadonlyText, - label: '{0}', - links: [ - { - text: localize('deployCluster.AzureLocationHelpLink', "View available Azure locations"), - url: 'https://azure.microsoft.com/global-infrastructure/services/?products=kubernetes-service' - } - ] - }] - }, { - items: [{ - type: FieldType.DateTimeText, - label: localize('deployCluster.AksName', "AKS cluster name"), - required: true, - variableName: AksName_VariableName, - defaultValue: 'mssql-', - }] - }, { - items: [ - { - type: FieldType.Number, - label: localize('deployCluster.VMCount', "VM count"), - required: true, - variableName: VMCount_VariableName, - defaultValue: '5', - min: 1, - max: 999 - } - ] - }, { - items: [{ - type: FieldType.Text, - label: localize('deployCluster.VMSize', "VM size"), - required: true, - variableName: VMSize_VariableName, - defaultValue: 'Standard_E8s_v3' - }, { - type: FieldType.ReadonlyText, - label: '{0}', - links: [ - { - text: localize('deployCluster.VMSizeHelpLink', "View available VM sizes"), - url: 'https://docs.microsoft.com/azure/virtual-machines/linux/sizes' - } - ] - }] - }] - }; - this.pageObject.registerContent(async (view: azdata.ModelView) => { - const azureGroup = await createSection({ - sectionInfo: azureSection, - view: view, - onNewDisposableCreated: (disposable: vscode.Disposable): void => { - self.wizard.registerDisposable(disposable); - }, - onNewInputComponentCreated: (name: string, inputComponentInfo: InputComponentInfo): void => { - self.inputComponents[name] = inputComponentInfo; - self._model.inputComponents[name] = inputComponentInfo; - }, - onNewValidatorCreated: (validator: Validator): void => { - self.validators.push(validator); - }, - container: this.wizard.wizardObject, - inputComponents: this._model.inputComponents, - toolsService: this.wizard.toolsService - }); - const formBuilder = view.modelBuilder.formContainer().withFormItems( - [{ - title: '', - component: azureGroup - }], - { - horizontal: false, - componentWidth: '100%' - } - ); - - const form = formBuilder.withLayout({ width: '100%' }).component(); - return view.initializeModel(form); - }); - } - - public override async onEnter(): Promise { - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - this.wizard.wizardObject.message = { text: '' }; - if (pcInfo.newPage > pcInfo.lastPage) { - const location = getDropdownComponent(Location_VariableName, this.inputComponents).value; - if (!location) { - this.wizard.wizardObject.message = { - text: MissingRequiredInformationErrorMessage, - level: azdata.window.MessageLevel.Error - }; - } - return !!location; - } else { - return true; - } - }); - } - - public override async onLeave(): Promise { - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - return true; - }); - await setModelValues(this.inputComponents, this.wizard.model); - } -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/clusterSettingsPage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/clusterSettingsPage.ts deleted file mode 100644 index bb32f6ea88..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/clusterSettingsPage.ts +++ /dev/null @@ -1,367 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import { EOL } from 'os'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import { FieldType, LabelPosition, SectionInfo } from '../../../interfaces'; -import * as localizedConstants from '../../../localizedConstants'; -import { createSection, getInputBoxComponent, getInvalidSQLPasswordMessage, getPasswordMismatchMessage, InputComponent, InputComponentInfo, InputComponents, isValidSQLPassword, setModelValues, Validator } from '../../modelViewUtils'; -import { ResourceTypePage } from '../../resourceTypePage'; -import { ValidationType } from '../../validation/validations'; -import * as VariableNames from '../constants'; -import { AuthenticationMode, DeployClusterWizardModel } from '../deployClusterWizardModel'; -const localize = nls.loadMessageBundle(); - -const ConfirmPasswordName = 'ConfirmPassword'; -const clusterNameFieldDescription = localize('deployCluster.ClusterNameDescription', "The cluster name must consist only of alphanumeric lowercase characters or '-' and must start and end with an alphanumeric character."); - -export class ClusterSettingsPage extends ResourceTypePage { - private inputComponents: InputComponents = {}; - private activeDirectorySection!: azdata.FormComponent; - private formBuilder!: azdata.FormBuilder; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.ClusterSettingsPageTitle', "Cluster settings"), - localize('deployCluster.ClusterSettingsPageDescription', "Configure the SQL Server Big Data Cluster settings"), _model.wizard); - } - - public initialize(): void { - const self = this; - const basicSection: SectionInfo = { - labelPosition: LabelPosition.Left, - title: '', - fields: [ - { - type: FieldType.Text, - label: localize('deployCluster.ClusterName', "Cluster name"), - required: true, - variableName: VariableNames.ClusterName_VariableName, - defaultValue: 'mssql-cluster', - validations: [{ - type: ValidationType.Regex, - regex: new RegExp('^[a-z0-9]$|^[a-z0-9][a-z0-9-]*[a-z0-9]$'), - description: clusterNameFieldDescription - }], - description: clusterNameFieldDescription - }, { - type: FieldType.Text, - label: localize('deployCluster.AdminUsername', "Admin username"), - required: true, - variableName: VariableNames.AdminUserName_VariableName, - defaultValue: 'admin', - description: localize('deployCluster.AdminUsernameDescription', "This username will be used for controller and SQL Server. Username for the gateway will be root.") - }, { - type: FieldType.Password, - label: localize('deployCluster.AdminPassword', "Password"), - required: true, - variableName: VariableNames.AdminPassword_VariableName, - defaultValue: '', - description: localize('deployCluster.AdminPasswordDescription', "This password can be used to access the controller, SQL Server and gateway.") - }, { - type: FieldType.Password, - label: localize('deployCluster.ConfirmPassword', "Confirm password"), - required: true, - variableName: ConfirmPasswordName, - defaultValue: '' - }, { - type: FieldType.Options, - label: localize('deployCluster.AuthenticationMode', "Authentication mode"), - variableName: VariableNames.AuthenticationMode_VariableName, - defaultValue: AuthenticationMode.Basic, - options: [ - { - name: AuthenticationMode.Basic, - displayName: localize('deployCluster.AuthenticationMode.Basic', "Basic") - }, - { - name: AuthenticationMode.ActiveDirectory, - displayName: localize('deployCluster.AuthenticationMode.ActiveDirectory', "Active Directory") - - } - ] - } - ] - }; - - const dockerSection: SectionInfo = { - labelPosition: LabelPosition.Left, - collapsed: true, - collapsible: true, - title: localize('deployCluster.DockerSettings', "Docker settings"), - fields: [ - { - type: FieldType.Text, - label: localize('deployCluster.DockerRegistry', "Registry"), - required: true, - variableName: VariableNames.DockerRegistry_VariableName - }, { - type: FieldType.Text, - label: localize('deployCluster.DockerRepository', "Repository"), - required: true, - variableName: VariableNames.DockerRepository_VariableName - }, { - type: FieldType.Text, - label: localize('deployCluster.DockerImageTag', "Image tag"), - required: true, - variableName: VariableNames.DockerImageTag_VariableName - }, { - type: FieldType.Text, - label: localize('deployCluster.DockerUsername', "Username"), - required: false, - variableName: VariableNames.DockerUsername_VariableName - }, { - type: FieldType.Password, - label: localize('deployCluster.DockerPassword', "Password"), - required: false, - variableName: VariableNames.DockerPassword_VariableName - } - ] - }; - - const activeDirectorySection: SectionInfo = { - labelPosition: LabelPosition.Left, - title: localize('deployCluster.ActiveDirectorySettings', "Active Directory settings"), - fields: [ - { - type: FieldType.Text, - label: localize('deployCluster.OuDistinguishedName', "Organizational unit"), - required: true, - variableName: VariableNames.OrganizationalUnitDistinguishedName_VariableName, - description: localize('deployCluster.OuDistinguishedNameDescription', "Distinguished name for the organizational unit. For example: OU=bdc,DC=contoso,DC=com.") - }, { - type: FieldType.Text, - label: localize('deployCluster.DomainControllerFQDNs', "Domain controller FQDNs"), - required: true, - variableName: VariableNames.DomainControllerFQDNs_VariableName, - placeHolder: localize('deployCluster.DomainControllerFQDNsPlaceHolder', "Use comma to separate the values."), - description: localize('deployCluster.DomainControllerFQDNDescription', "Fully qualified domain names for the domain controller. For example: DC1.CONTOSO.COM. Use comma to separate multiple FQDNs.") - }, { - type: FieldType.Text, - label: localize('deployCluster.DomainDNSIPAddresses', "Domain DNS IP addresses"), - required: true, - variableName: VariableNames.DomainDNSIPAddresses_VariableName, - placeHolder: localize('deployCluster.DomainDNSIPAddressesPlaceHolder', "Use comma to separate the values."), - description: localize('deployCluster.DomainDNSIPAddressesDescription', "Domain DNS servers' IP Addresses. Use comma to separate multiple IP addresses.") - }, { - type: FieldType.Text, - label: localize('deployCluster.DomainDNSName', "Domain DNS name"), - required: true, - variableName: VariableNames.DomainDNSName_VariableName - }, { - type: FieldType.Text, - label: localizedConstants.realm, - required: false, - variableName: VariableNames.Realm_VariableName, - description: localize('deployCluster.RealmDescription', "If not provided, the domain DNS name will be used as the default value.") - }, { - type: FieldType.Text, - label: localize('deployCluster.ClusterAdmins', "Cluster admin group"), - required: true, - variableName: VariableNames.ClusterAdmins_VariableName, - description: localize('deployCluster.ClusterAdminsDescription', "The Active Directory group for cluster admin.") - }, { - type: FieldType.Text, - label: localize('deployCluster.ClusterUsers', "Cluster users"), - required: true, - variableName: VariableNames.ClusterUsers_VariableName, - placeHolder: localize('deployCluster.ClusterUsersPlaceHolder', "Use comma to separate the values."), - description: localize('deployCluster.ClusterUsersDescription', "The Active Directory users/groups with cluster users role. Use comma to separate multiple users/groups.") - }, { - type: FieldType.Text, - label: localize('deployCluster.DomainServiceAccountUserName', "Service account username"), - required: true, - variableName: VariableNames.DomainServiceAccountUserName_VariableName, - description: localize('deployCluster.DomainServiceAccountUserNameDescription', "Domain service account for Big Data Cluster") - }, { - type: FieldType.Password, - label: localize('deployCluster.DomainServiceAccountPassword', "Service account password"), - required: true, - variableName: VariableNames.DomainServiceAccountPassword_VariableName - }, { - type: FieldType.Text, - label: localize('deployCluster.AppOwners', "App owners"), - required: false, - variableName: VariableNames.AppOwners_VariableName, - placeHolder: localize('deployCluster.AppOwnersPlaceHolder', "Use comma to separate the values."), - description: localize('deployCluster.AppOwnersDescription', "The Active Directory users or groups with app owners role. Use comma to separate multiple users/groups.") - }, { - type: FieldType.Text, - label: localize('deployCluster.AppReaders', "App readers"), - required: false, - variableName: VariableNames.AppReaders_VariableName, - placeHolder: localize('deployCluster.AppReadersPlaceHolder', "Use comma to separate the values."), - description: localize('deployCluster.AppReadersDescription', "The Active Directory users or groups of app readers. Use comma as separator them if there are multiple users/groups.") - }, { - type: FieldType.Text, - label: localize('deployCluster.Subdomain', "Subdomain"), - required: false, - variableName: VariableNames.Subdomain_VariableName, - description: localize('deployCluster.SubdomainDescription', "A unique DNS subdomain to use for this SQL Server Big Data Cluster. If not provided, the cluster name will be used as the default value.") - }, { - type: FieldType.Text, - label: localize('deployCluster.AccountPrefix', "Account prefix"), - required: false, - variableName: VariableNames.AccountPrefix_VariableName, - description: localize('deployCluster.AccountPrefixDescription', "A unique prefix for AD accounts SQL Server Big Data Cluster will generate. If not provided, the subdomain name will be used as the default value. If a subdomain is not provided, the cluster name will be used as the default value.") - } - ] - }; - this.pageObject.registerContent(async (view: azdata.ModelView) => { - const basicSettingsGroup = await createSection({ - view: view, - container: self.wizard.wizardObject, - inputComponents: this._model.inputComponents, - sectionInfo: basicSection, - onNewDisposableCreated: (disposable: vscode.Disposable): void => { - self.wizard.registerDisposable(disposable); - }, - onNewInputComponentCreated: (name: string, inputComponentInfo: InputComponentInfo): void => { - self.inputComponents[name] = inputComponentInfo; - self._model.inputComponents[name] = inputComponentInfo; - }, - onNewValidatorCreated: (validator: Validator): void => { - self.validators.push(validator); - }, - toolsService: this.wizard.toolsService - }); - const activeDirectorySettingsGroup = await createSection({ - view: view, - container: self.wizard.wizardObject, - inputComponents: this._model.inputComponents, - sectionInfo: activeDirectorySection, - onNewDisposableCreated: (disposable: vscode.Disposable): void => { - self.wizard.registerDisposable(disposable); - }, - onNewInputComponentCreated: (name: string, inputComponentInfo: InputComponentInfo): void => { - self.inputComponents[name] = inputComponentInfo; - self._model.inputComponents[name] = inputComponentInfo; - }, - onNewValidatorCreated: (validator: Validator): void => { - self.validators.push(validator); - }, - toolsService: this.wizard.toolsService - }); - const dockerSettingsGroup = await createSection({ - view: view, - container: self.wizard.wizardObject, - inputComponents: this._model.inputComponents, - sectionInfo: dockerSection, - onNewDisposableCreated: (disposable: vscode.Disposable): void => { - self.wizard.registerDisposable(disposable); - }, - onNewInputComponentCreated: (name: string, inputComponentInfo: InputComponentInfo): void => { - self.inputComponents[name] = inputComponentInfo; - self._model.inputComponents[name] = inputComponentInfo; - }, - onNewValidatorCreated: (validator: Validator): void => { - self.validators.push(validator); - }, - toolsService: this.wizard.toolsService - }); - const basicSettingsFormItem = { title: '', component: basicSettingsGroup }; - const dockerSettingsFormItem = { title: '', component: dockerSettingsGroup }; - this.activeDirectorySection = { title: '', component: activeDirectorySettingsGroup }; - const authModeDropdown = this.inputComponents[VariableNames.AuthenticationMode_VariableName].component; - this.formBuilder = view.modelBuilder.formContainer().withFormItems( - [basicSettingsFormItem, dockerSettingsFormItem], - { - horizontal: false, - componentWidth: '100%' - } - ); - this.wizard.registerDisposable(authModeDropdown.onValueChanged(() => { - const isBasicAuthMode = (authModeDropdown.value).name === 'basic'; - getInputBoxComponent(VariableNames.OrganizationalUnitDistinguishedName_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.DomainControllerFQDNs_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.DomainDNSIPAddresses_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.DomainDNSName_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.ClusterAdmins_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.ClusterUsers_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.DomainServiceAccountUserName_VariableName, this.inputComponents).required = !isBasicAuthMode; - getInputBoxComponent(VariableNames.DomainServiceAccountPassword_VariableName, this.inputComponents).required = !isBasicAuthMode; - if (isBasicAuthMode) { - this.formBuilder.removeFormItem(this.activeDirectorySection); - } else { - this.formBuilder.insertFormItem(this.activeDirectorySection); - } - })); - const form = this.formBuilder.withLayout({ width: '100%' }).component(); - return view.initializeModel(form); - }); - } - - public override async onLeave(): Promise { - await setModelValues(this.inputComponents, this.wizard.model); - if (this._model.authenticationMode === AuthenticationMode.ActiveDirectory) { - const variableDNSPrefixMapping: { [s: string]: string } = {}; - variableDNSPrefixMapping[VariableNames.AppServiceProxyDNSName_VariableName] = 'bdc-appproxy'; - variableDNSPrefixMapping[VariableNames.ControllerDNSName_VariableName] = 'bdc-control'; - variableDNSPrefixMapping[VariableNames.GatewayDNSName_VariableName] = 'bdc-gateway'; - variableDNSPrefixMapping[VariableNames.ReadableSecondaryDNSName_VariableName] = 'bdc-sqlread'; - variableDNSPrefixMapping[VariableNames.SQLServerDNSName_VariableName] = 'bdc-sql'; - variableDNSPrefixMapping[VariableNames.ServiceProxyDNSName_VariableName] = 'bdc-proxy'; - - const subdomain = this.wizard.model.getStringValue(VariableNames.Subdomain_VariableName) || this.wizard.model.getStringValue(VariableNames.ClusterName_VariableName); - Object.keys(variableDNSPrefixMapping).forEach((variableName: string) => { - this.wizard.model.setPropertyValue(variableName, `${variableDNSPrefixMapping[variableName]}.${subdomain}.${this.wizard.model.getStringValue(VariableNames.DomainDNSName_VariableName)}`); - }); - } - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - return true; - }); - } - - public override async onEnter(): Promise { - getInputBoxComponent(VariableNames.DockerRegistry_VariableName, this.inputComponents).value = this.wizard.model.getStringValue(VariableNames.DockerRegistry_VariableName); - getInputBoxComponent(VariableNames.DockerRepository_VariableName, this.inputComponents).value = this.wizard.model.getStringValue(VariableNames.DockerRepository_VariableName); - getInputBoxComponent(VariableNames.DockerImageTag_VariableName, this.inputComponents).value = this.wizard.model.getStringValue(VariableNames.DockerImageTag_VariableName); - const authModeDropdown = this.inputComponents[VariableNames.AuthenticationMode_VariableName].component; - if (authModeDropdown) { - authModeDropdown.enabled = this._model.adAuthSupported; - const adAuthSelected = (authModeDropdown.value).name === 'ad'; - if (!this._model.adAuthSupported && adAuthSelected) { - this.formBuilder.removeFormItem(this.activeDirectorySection); - authModeDropdown.value = { - name: AuthenticationMode.Basic, - displayName: localize('deployCluster.AuthenticationMode.Basic', "Basic") - }; - } - - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - this.wizard.wizardObject.message = { text: '' }; - if (pcInfo.newPage > pcInfo.lastPage) { - const messages: string[] = []; - const password = getInputBoxComponent(VariableNames.AdminPassword_VariableName, this.inputComponents).value!; - const confirmPassword = getInputBoxComponent(ConfirmPasswordName, this.inputComponents).value!; - if (password !== confirmPassword) { - messages.push(getPasswordMismatchMessage(localize('deployCluster.AdminPasswordField', "Password"))); - } - if (!isValidSQLPassword(password, getInputBoxComponent(VariableNames.AdminUserName_VariableName, this.inputComponents).value!)) { - messages.push(getInvalidSQLPasswordMessage(localize('deployCluster.AdminPasswordField', "Password"))); - } - - this.validators.forEach(validator => { - const result = validator(); - if (!result.valid) { - messages.push(result.message); - } - }); - - if (messages.length > 0) { - this._model.wizard.wizardObject.message = { - text: messages.length === 1 ? messages[0] : localizedConstants.multipleValidationErrors, - description: messages.length === 1 ? undefined : messages.join(EOL), - level: azdata.window.MessageLevel.Error - }; - } - return messages.length === 0; - } - return true; - }); - } - } -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/deploymentProfilePage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/deploymentProfilePage.ts deleted file mode 100644 index 3078173038..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/deploymentProfilePage.ts +++ /dev/null @@ -1,257 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import { BdcDeploymentType } from '../../../interfaces'; -import { BigDataClusterDeploymentProfile } from '../../../services/bigDataClusterDeploymentProfile'; -import { createFlexContainer, createLabel } from '../../modelViewUtils'; -import { ResourceTypePage } from '../../resourceTypePage'; -import * as VariableNames from '../constants'; -import { DeployClusterWizardModel } from '../deployClusterWizardModel'; -const localize = nls.loadMessageBundle(); - -const serviceScaleTableTitle = localize('deployCluster.serviceScaleTableTitle', "Service scale settings (Instances)"); -const storageTableTitle = localize('deployCluster.storageTableTitle', "Service storage settings (GB per Instance)"); -const featureTableTitle = localize('deployCluster.featureTableTitle', "Features"); -const YesText = localize('deployCluster.yesText', "Yes"); -const NoText = localize('deployCluster.noText', "No"); - -export class DeploymentProfilePage extends ResourceTypePage { - private _loadingComponent: azdata.LoadingComponent | undefined; - private _container: azdata.FlexContainer | undefined; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.summaryPageTitle', "Deployment configuration profile"), - localize('deployCluster.summaryPageDescription', "Select the target configuration profile"), _model.wizard); - } - - public initialize(): void { - this.pageObject.registerContent(async (view: azdata.ModelView): Promise => { - this._container = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'column' }).component(); - const hintText = view.modelBuilder.text().withProps({ - value: localize('deployCluster.ProfileHintText', "Note: The settings of the deployment profile can be customized in later steps.") - }).component(); - const container = createFlexContainer(view, [this._container, hintText], false); - this._loadingComponent = view.modelBuilder.loadingComponent().withItem(container).withProps({ - loading: true, - loadingText: localize('deployCluster.loadingProfiles', "Loading profiles"), - loadingCompletedText: localize('deployCluster.loadingProfilesCompleted', "Loading profiles completed"), - showText: true - }).component(); - let formBuilder = view.modelBuilder.formContainer().withFormItems( - [ - { - title: '', - component: this._loadingComponent - } - ], - { - horizontal: false - } - ).withLayout({ width: '100%', height: '100%' }); - const form = formBuilder.withLayout({ width: '100%' }).component(); - await view.initializeModel(form); - await this.loadProfiles(view); - }); - } - - private setModelValuesByProfile(selectedProfile: BigDataClusterDeploymentProfile): void { - this._model.setPropertyValue(VariableNames.DeploymentProfile_VariableName, selectedProfile.profileName); - this._model.setPropertyValue(VariableNames.SparkPoolScale_VariableName, selectedProfile.sparkReplicas); - this._model.setPropertyValue(VariableNames.DataPoolScale_VariableName, selectedProfile.dataReplicas); - this._model.setPropertyValue(VariableNames.HDFSPoolScale_VariableName, selectedProfile.hdfsReplicas); - this._model.setPropertyValue(VariableNames.ComputePoolScale_VariableName, selectedProfile.computeReplicas); - this._model.setPropertyValue(VariableNames.HDFSNameNodeScale_VariableName, selectedProfile.hdfsNameNodeReplicas); - this._model.setPropertyValue(VariableNames.SQLServerScale_VariableName, selectedProfile.sqlServerReplicas); - this._model.setPropertyValue(VariableNames.SparkHeadScale_VariableName, selectedProfile.sparkHeadReplicas); - this._model.setPropertyValue(VariableNames.ZooKeeperScale_VariableName, selectedProfile.zooKeeperReplicas); - this._model.setPropertyValue(VariableNames.ControllerDataStorageSize_VariableName, selectedProfile.controllerDataStorageSize); - this._model.setPropertyValue(VariableNames.ControllerLogsStorageSize_VariableName, selectedProfile.controllerLogsStorageSize); - this._model.setPropertyValue(VariableNames.SQLServerPort_VariableName, selectedProfile.sqlServerPort); - this._model.setPropertyValue(VariableNames.GateWayPort_VariableName, selectedProfile.gatewayPort); - this._model.setPropertyValue(VariableNames.ControllerPort_VariableName, selectedProfile.controllerPort); - this._model.setPropertyValue(VariableNames.ServiceProxyPort_VariableName, selectedProfile.serviceProxyPort); - this._model.setPropertyValue(VariableNames.AppServiceProxyPort_VariableName, selectedProfile.appServiceProxyPort); - this._model.setPropertyValue(VariableNames.IncludeSpark_VariableName, selectedProfile.includeSpark); - this._model.setPropertyValue(VariableNames.ControllerDataStorageClassName_VariableName, selectedProfile.controllerDataStorageClass); - this._model.setPropertyValue(VariableNames.ControllerLogsStorageClassName_VariableName, selectedProfile.controllerLogsStorageClass); - this._model.setPropertyValue(VariableNames.ReadableSecondaryPort_VariableName, selectedProfile.sqlServerReadableSecondaryPort); - this._model.setPropertyValue(VariableNames.DockerRegistry_VariableName, selectedProfile.registry); - this._model.setPropertyValue(VariableNames.DockerRepository_VariableName, selectedProfile.repository); - this._model.setPropertyValue(VariableNames.DockerImageTag_VariableName, selectedProfile.imageTag); - this._model.adAuthSupported = selectedProfile.activeDirectorySupported; - this._model.selectedProfile = selectedProfile; - } - - private async loadProfiles(view: azdata.ModelView): Promise { - try { - const profiles = await this.wizard.azdataService.getDeploymentProfiles(this._model.deploymentType); - const radioButtonGroup = this.createRadioButtonGroup(view, profiles); - const serviceScaleTable = this.createServiceScaleTable(view, profiles); - const storageTable = this.createStorageTable(view, profiles); - const featuresTable = this.createFeaturesTable(view, profiles); - this._container!.addItem(createLabel(view, { text: localize('deployCluster.profileRadioGroupLabel', "Deployment configuration profile") }), { - CSSStyles: { 'margin-bottom': '5px' } - }); - this._container!.addItem(radioButtonGroup, { - CSSStyles: { 'margin-bottom': '20px' } - }); - this._container!.addItems([ - this.createTableGroup(view, serviceScaleTableTitle, serviceScaleTable), - this.createTableGroup(view, storageTableTitle, storageTable), - this.createTableGroup(view, featureTableTitle, featuresTable) - ], { - CSSStyles: { 'margin-bottom': '10px' } - }); - this._loadingComponent!.loading = false; - } catch (error) { - this.wizard.wizardObject.message = { - level: azdata.window.MessageLevel.Error, - text: localize('deployCluster.loadProfileFailed', "Failed to load the deployment profiles: {0}", error.message) - }; - this._loadingComponent!.loading = false; - } - } - - private createRadioButtonGroup(view: azdata.ModelView, profiles: BigDataClusterDeploymentProfile[]): azdata.FlexContainer { - const defaultProfile: string = this.getDefaultProfile(); - const groupName = 'profileGroup'; - const radioButtons = profiles.map(profile => { - const checked = profile.profileName === defaultProfile; - const radioButton = view.modelBuilder.radioButton().withProps({ - label: profile.profileName, - checked: checked, - name: groupName - }).component(); - if (checked) { - this.setModelValuesByProfile(profile); - radioButton.focus(); - } - this.wizard.registerDisposable(radioButton.onDidClick(() => { - this.wizard.wizardObject.message = { text: '' }; - this.setModelValuesByProfile(profile); - })); - return radioButton; - }); - return view.modelBuilder.flexContainer().withLayout({ flexFlow: 'row' }).withItems(radioButtons, { flex: '0 0 auto', CSSStyles: { 'margin-right': '20px' } }).component(); - } - - private createServiceScaleTable(view: azdata.ModelView, profiles: BigDataClusterDeploymentProfile[]): azdata.TableComponent { - const data = [ - [localize('deployCluster.masterPoolLabel', "SQL Server Master"), ...profiles.map(profile => profile.sqlServerReplicas.toString())], - [localize('deployCluster.computePoolLable', "Compute"), ...profiles.map(profile => profile.computeReplicas.toString())], - [localize('deployCluster.dataPoolLabel', "Data"), ...profiles.map(profile => profile.dataReplicas.toString())], - [localize('deployCluster.hdfsLabel', "HDFS + Spark"), ...profiles.map(profile => profile.hdfsReplicas.toString())] - ]; - - return view.modelBuilder.table().withProps({ - columns: [this.createDescriptionColumn(localize('deployCluster.ServiceName', "Service")), ...this.createProfileColumns(profiles)], - data: data, - title: serviceScaleTableTitle, - ariaLabel: serviceScaleTableTitle, - height: 140, - width: 200 + 150 * profiles.length - }).component(); - } - - private createStorageTable(view: azdata.ModelView, profiles: BigDataClusterDeploymentProfile[]): azdata.TableComponent { - const data = [ - [localize('deployCluster.dataStorageType', "Data"), ...profiles.map(profile => profile.controllerDataStorageSize.toString())], - [localize('deployCluster.logsStorageType', "Logs"), ...profiles.map(profile => profile.controllerLogsStorageSize.toString())] - ]; - return view.modelBuilder.table().withProps({ - columns: [this.createDescriptionColumn(localize('deployCluster.StorageType', "Storage type")), ...this.createProfileColumns(profiles)], - data: data, - title: storageTableTitle, - ariaLabel: storageTableTitle, - height: 80, - width: 200 + 150 * profiles.length - }).component(); - } - - private createFeaturesTable(view: azdata.ModelView, profiles: BigDataClusterDeploymentProfile[]): azdata.TableComponent { - const data = [ - [localize('deployCluster.basicAuthentication', "Basic authentication"), ...profiles.map(profile => YesText)], - ]; - if (profiles.findIndex(profile => profile.activeDirectorySupported) !== -1) { - data.push([localize('deployCluster.activeDirectoryAuthentication', "Active Directory authentication"), ...profiles.map(profile => profile.activeDirectorySupported ? YesText : NoText)]); - } - - if (profiles.findIndex(profile => profile.sqlServerReplicas > 1) !== -1) { - data.push([localize('deployCluster.hadr', "High Availability"), ...profiles.map(profile => profile.sqlServerReplicas > 1 ? YesText : NoText)]); - } - - return view.modelBuilder.table().withProps({ - columns: [this.createDescriptionColumn(localize('deployCluster.featureText', "Feature")), ...this.createProfileColumns(profiles)], - data: data, - title: featureTableTitle, - ariaLabel: featureTableTitle, - height: 30 + data.length * 25, - width: 200 + 150 * profiles.length - }).component(); - } - - private createDescriptionColumn(header: string): azdata.TableColumn { - return { - value: header, - width: 150 - }; - } - - private createProfileColumns(profiles: BigDataClusterDeploymentProfile[]): azdata.TableColumn[] { - return profiles.map(profile => { - return { - value: profile.profileName, - width: 100 - }; - }); - } - private createTableGroup(view: azdata.ModelView, title: string, table: azdata.TableComponent): azdata.FlexContainer { - return view.modelBuilder.flexContainer() - .withItems([createLabel(view, { text: title }), table], { CSSStyles: { 'margin-bottom': '5px' } }) - .withLayout({ flexFlow: 'column' }) - .component(); - } - - public override async onEnter(): Promise { - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - this.wizard.wizardObject.message = { text: '' }; - if (pcInfo.newPage > pcInfo.lastPage) { - const isValid = this.wizard.model.getStringValue(VariableNames.DeploymentProfile_VariableName) !== undefined; - if (!isValid) { - this.wizard.wizardObject.message = { - text: localize('deployCluster.ProfileNotSelectedError', "Please select a deployment profile."), - level: azdata.window.MessageLevel.Error - }; - } - return isValid; - } - return true; - }); - } - - public override async onLeave(): Promise { - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - return true; - }); - } - - private getDefaultProfile(): string { - switch (this._model.deploymentType) { - case BdcDeploymentType.NewAKS: - case BdcDeploymentType.ExistingAKS: - return 'aks-dev-test'; - case BdcDeploymentType.ExistingKubeAdm: - return 'kubeadm-dev-test'; - case BdcDeploymentType.ExistingARO: - return 'aro-dev-test'; - case BdcDeploymentType.ExistingOpenShift: - return 'openshift-dev-test'; - default: - throw new Error(`Unknown deployment type: ${this._model.deploymentType}`); - } - } -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/serviceSettingsPage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/serviceSettingsPage.ts deleted file mode 100644 index 0794dd767f..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/serviceSettingsPage.ts +++ /dev/null @@ -1,448 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import * as azdata from 'azdata'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import { FieldType, SectionInfo } from '../../../interfaces'; -import { createFlexContainer, createGroupContainer, createLabel, createNumberInputBoxInputInfo, createSection, createInputBoxInputInfo, getCheckboxComponent, getDropdownComponent, getInputBoxComponent, InputComponentInfo, InputComponents, setModelValues, Validator, InputComponent } from '../../modelViewUtils'; -import { ResourceTypePage } from '../../resourceTypePage'; -import * as VariableNames from '../constants'; -import { AuthenticationMode, DeployClusterWizardModel } from '../deployClusterWizardModel'; -const localize = nls.loadMessageBundle(); - -const NumberInputWidth = '100px'; -const inputWidth = '180px'; -const labelWidth = '200px'; - -export class ServiceSettingsPage extends ResourceTypePage { - private inputComponents: InputComponents = {}; - private endpointHeaderRow!: azdata.FlexContainer; - private dnsColumnHeader!: azdata.TextComponent; - private portColumnHeader!: azdata.TextComponent; - private controllerDNSInput!: azdata.InputBoxComponent; - private controllerPortInput!: azdata.InputBoxComponent; - private controllerEndpointRow!: azdata.FlexContainer; - private sqlServerDNSInput!: azdata.InputBoxComponent; - private sqlServerEndpointRow!: azdata.FlexContainer; - private sqlServerPortInput!: azdata.InputBoxComponent; - private gatewayDNSInput!: azdata.InputBoxComponent; - private gatewayPortInput!: azdata.InputBoxComponent; - private gatewayEndpointRow!: azdata.FlexContainer; - private serviceProxyDNSInput!: azdata.InputBoxComponent; - private serviceProxyPortInput!: azdata.InputBoxComponent; - private serviceProxyEndpointRow!: azdata.FlexContainer; - private appServiceProxyDNSInput!: azdata.InputBoxComponent; - private appServiceProxyPortInput!: azdata.InputBoxComponent; - private appServiceProxyEndpointRow!: azdata.FlexContainer; - private readableSecondaryDNSInput!: azdata.InputBoxComponent; - private readableSecondaryPortInput!: azdata.InputBoxComponent; - private readableSecondaryEndpointRow!: azdata.FlexContainer; - private endpointNameColumnHeader!: azdata.TextComponent; - private controllerNameLabel!: azdata.TextComponent; - private SqlServerNameLabel!: azdata.TextComponent; - private gatewayNameLabel!: azdata.TextComponent; - private serviceProxyNameLabel!: azdata.TextComponent; - private appServiceProxyNameLabel!: azdata.TextComponent; - private readableSecondaryNameLabel!: azdata.TextComponent; - private endpointSection!: azdata.GroupContainer; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.ServiceSettingsPageTitle', "Service settings"), '', _model.wizard); - } - public initialize(): void { - const self = this; - const scaleSectionInfo: SectionInfo = { - title: localize('deployCluster.scaleSectionTitle', "Scale settings"), - labelWidth: labelWidth, - inputWidth: NumberInputWidth, - spaceBetweenFields: '40px', - rows: [{ - items: [{ - type: FieldType.Options, - label: localize('deployCluster.MasterSqlServerInstances', "SQL Server master instances"), - options: ['1', '3', '4', '5', '6', '7', '8', '9'], - defaultValue: '1', - variableName: VariableNames.SQLServerScale_VariableName, - }, { - type: FieldType.Number, - label: localize('deployCluster.ComputePoolInstances', "Compute pool instances"), - min: 1, - max: 100, - defaultValue: '1', - required: true, - variableName: VariableNames.ComputePoolScale_VariableName, - }] - }, { - items: [{ - type: FieldType.Number, - label: localize('deployCluster.DataPoolInstances', "Data pool instances"), - min: 1, - max: 100, - defaultValue: '1', - required: true, - variableName: VariableNames.DataPoolScale_VariableName, - }, { - type: FieldType.Number, - label: localize('deployCluster.SparkPoolInstances', "Spark pool instances"), - min: 0, - max: 100, - defaultValue: '0', - required: true, - variableName: VariableNames.SparkPoolScale_VariableName - }] - }, { - items: [ - { - type: FieldType.Number, - label: localize('deployCluster.StoragePoolInstances', "Storage pool (HDFS) instances"), - min: 1, - max: 100, - defaultValue: '1', - required: true, - variableName: VariableNames.HDFSPoolScale_VariableName - }, { - type: FieldType.Checkbox, - label: localize('deployCluster.IncludeSparkInStoragePool', "Include Spark in storage pool"), - defaultValue: 'true', - variableName: VariableNames.IncludeSpark_VariableName, - required: false - } - ] - } - ] - }; - - this.pageObject.registerContent(async (view: azdata.ModelView) => { - const createSectionFunc = async (sectionInfo: SectionInfo): Promise => { - return await createSection({ - view: view, - container: this.wizard.wizardObject, - inputComponents: this._model.inputComponents, - sectionInfo: sectionInfo, - onNewDisposableCreated: (disposable: vscode.Disposable): void => { - self.wizard.registerDisposable(disposable); - }, - onNewInputComponentCreated: (name: string, inputComponentInfo: InputComponentInfo): void => { - self.onNewInputComponentCreated(name, inputComponentInfo); - }, - onNewValidatorCreated: (validator: Validator): void => { - }, - toolsService: this.wizard.toolsService - }); - }; - const scaleSection = await createSectionFunc(scaleSectionInfo); - this.endpointSection = this.createEndpointSection(view); - const storageSection = this.createStorageSection(view); - - this.handleSparkSettingEvents(); - const form = view.modelBuilder.formContainer().withFormItems([ - { - title: '', - component: scaleSection - }, { - title: '', - component: this.endpointSection - }, { - title: '', - component: storageSection - } - ]).withLayout({ width: '100%' }).component(); - return view.initializeModel(form); - }); - } - - private onNewInputComponentCreated(name: string, inputComponentInfo: InputComponentInfo) { - this.inputComponents[name] = inputComponentInfo; - this._model.inputComponents[name] = inputComponentInfo; - } - - private handleSparkSettingEvents(): void { - const sparkInstanceInput = getInputBoxComponent(VariableNames.SparkPoolScale_VariableName, this.inputComponents); - const includeSparkCheckbox = getCheckboxComponent(VariableNames.IncludeSpark_VariableName, this.inputComponents); - this.wizard.registerDisposable(includeSparkCheckbox.onChanged(() => { - if (!includeSparkCheckbox.checked && !(sparkInstanceInput.value && Number.parseInt(sparkInstanceInput.value) > 0)) { - sparkInstanceInput.value = '1'; - } - })); - } - - private createEndpointSection(view: azdata.ModelView): azdata.GroupContainer { - this.endpointNameColumnHeader = createLabel(view, { text: '', width: labelWidth }); - this.dnsColumnHeader = createLabel(view, { text: localize('deployCluster.DNSNameHeader', "DNS name"), width: inputWidth }); - this.portColumnHeader = createLabel(view, { text: localize('deployCluster.PortHeader', "Port"), width: NumberInputWidth }); - this.endpointHeaderRow = createFlexContainer(view, [this.endpointNameColumnHeader, this.dnsColumnHeader, this.portColumnHeader]); - - this.controllerNameLabel = createLabel(view, { text: localize('deployCluster.ControllerText', "Controller"), width: labelWidth, required: true }); - const controllerDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ControllerDNSName', "Controller DNS name"), required: false, width: inputWidth }); - this.controllerDNSInput = controllerDNSInput.component; - const controllerPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ControllerPortName', "Controller port"), required: true, width: NumberInputWidth, min: 1 }); - this.controllerPortInput = controllerPortInput.component; - this.controllerEndpointRow = createFlexContainer(view, [this.controllerNameLabel, this.controllerDNSInput, this.controllerPortInput]); - this.onNewInputComponentCreated(VariableNames.ControllerDNSName_VariableName, controllerDNSInput); - this.onNewInputComponentCreated(VariableNames.ControllerPort_VariableName, controllerPortInput); - - this.SqlServerNameLabel = createLabel(view, { text: localize('deployCluster.MasterSqlText', "SQL Server Master"), width: labelWidth, required: true }); - const sqlServerDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.MasterSQLServerDNSName', "SQL Server Master DNS name"), required: false, width: inputWidth }); - this.sqlServerDNSInput = sqlServerDNSInput.component; - const sqlServerPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.MasterSQLServerPortName', "SQL Server Master port"), required: true, width: NumberInputWidth, min: 1 }); - this.sqlServerPortInput = sqlServerPortInput.component; - this.sqlServerEndpointRow = createFlexContainer(view, [this.SqlServerNameLabel, this.sqlServerDNSInput, this.sqlServerPortInput]); - this.onNewInputComponentCreated(VariableNames.SQLServerDNSName_VariableName, sqlServerDNSInput); - this.onNewInputComponentCreated(VariableNames.SQLServerPort_VariableName, sqlServerPortInput); - - this.gatewayNameLabel = createLabel(view, { text: localize('deployCluster.GatewayText', "Gateway"), width: labelWidth, required: true }); - const gatewayDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.GatewayDNSName', "Gateway DNS name"), required: false, width: inputWidth }); - this.gatewayDNSInput = gatewayDNSInput.component; - const gatewayPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.GatewayPortName', "Gateway port"), required: true, width: NumberInputWidth, min: 1 }); - this.gatewayPortInput = gatewayPortInput.component; - this.gatewayEndpointRow = createFlexContainer(view, [this.gatewayNameLabel, this.gatewayDNSInput, this.gatewayPortInput]); - this.onNewInputComponentCreated(VariableNames.GatewayDNSName_VariableName, gatewayDNSInput); - this.onNewInputComponentCreated(VariableNames.GateWayPort_VariableName, gatewayPortInput); - - this.serviceProxyNameLabel = createLabel(view, { text: localize('deployCluster.ServiceProxyText', "Management proxy"), width: labelWidth, required: true }); - const serviceProxyDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ServiceProxyDNSName', "Management proxy DNS name"), required: false, width: inputWidth }); - this.serviceProxyDNSInput = serviceProxyDNSInput.component; - const serviceProxyPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ServiceProxyPortName', "Management proxy port"), required: true, width: NumberInputWidth, min: 1 }); - this.serviceProxyPortInput = serviceProxyPortInput.component; - this.serviceProxyEndpointRow = createFlexContainer(view, [this.serviceProxyNameLabel, this.serviceProxyDNSInput, this.serviceProxyPortInput]); - this.onNewInputComponentCreated(VariableNames.ServiceProxyDNSName_VariableName, serviceProxyDNSInput); - this.onNewInputComponentCreated(VariableNames.ServiceProxyPort_VariableName, serviceProxyPortInput); - - this.appServiceProxyNameLabel = createLabel(view, { text: localize('deployCluster.AppServiceProxyText', "Application proxy"), width: labelWidth, required: true }); - const appServiceProxyDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.AppServiceProxyDNSName', "Application proxy DNS name"), required: false, width: inputWidth }); - this.appServiceProxyDNSInput = appServiceProxyDNSInput.component; - const appServiceProxyPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.AppServiceProxyPortName', "Application proxy port"), required: true, width: NumberInputWidth, min: 1 }); - this.appServiceProxyPortInput = appServiceProxyPortInput.component; - this.appServiceProxyEndpointRow = createFlexContainer(view, [this.appServiceProxyNameLabel, this.appServiceProxyDNSInput, this.appServiceProxyPortInput]); - this.onNewInputComponentCreated(VariableNames.AppServiceProxyDNSName_VariableName, appServiceProxyDNSInput); - this.onNewInputComponentCreated(VariableNames.AppServiceProxyPort_VariableName, appServiceProxyPortInput); - - this.readableSecondaryNameLabel = createLabel(view, { text: localize('deployCluster.ReadableSecondaryText', "Readable secondary"), width: labelWidth, required: true }); - const readableSecondaryDNSInput = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ReadableSecondaryDNSName', "Readable secondary DNS name"), required: false, width: inputWidth }); - this.readableSecondaryDNSInput = readableSecondaryDNSInput.component; - const readableSecondaryPortInput = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.ReadableSecondaryPortName', "Readable secondary port"), required: false, width: NumberInputWidth, min: 1 }); - this.readableSecondaryPortInput = readableSecondaryPortInput.component; - this.readableSecondaryEndpointRow = createFlexContainer(view, [this.readableSecondaryNameLabel, this.readableSecondaryDNSInput, this.readableSecondaryPortInput]); - this.onNewInputComponentCreated(VariableNames.ReadableSecondaryDNSName_VariableName, readableSecondaryDNSInput); - this.onNewInputComponentCreated(VariableNames.ReadableSecondaryPort_VariableName, readableSecondaryPortInput); - - return createGroupContainer(view, [this.endpointHeaderRow, this.controllerEndpointRow, this.sqlServerEndpointRow, this.gatewayEndpointRow, this.serviceProxyEndpointRow, this.appServiceProxyEndpointRow, this.readableSecondaryEndpointRow], { - header: localize('deployCluster.EndpointSettings', "Endpoint settings"), - collapsible: true - }); - } - - private createStorageSection(view: azdata.ModelView): azdata.GroupContainer { - const hintTextForStorageFields = localize('deployCluster.storageFieldTooltip', "Use controller settings"); - const controllerLabel = createLabel(view, - { - text: localize('deployCluster.ControllerText', "Controller"), - width: inputWidth, - required: true, - description: localize('deployCluster.AdvancedStorageDescription', "By default Controller storage settings will be applied to other services as well, you can expand the advanced storage settings to configure storage for other services.") - }); - const controllerDataStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.controllerDataStorageClass', "Controller's data storage class"), width: inputWidth, required: true }); - const controllerDataStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.controllerDataStorageClaimSize', "Controller's data storage claim size (Gigabytes)"), width: inputWidth, required: true, min: 1 }); - const controllerLogsStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.controllerLogsStorageClass', "Controller's logs storage class"), width: inputWidth, required: true }); - const controllerLogsStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.controllerLogsStorageClaimSize', "Controller's logs storage claim size (Gigabytes)"), width: inputWidth, required: true, min: 1 }); - - const storagePoolLabel = createLabel(view, - { - text: localize('deployCluster.StoragePool', "Storage pool (HDFS)"), - width: inputWidth, - required: false - }); - - const storagePoolDataStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.storagePoolDataStorageClass', "Storage pool's data storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const storagePoolDataStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.storagePoolDataStorageClaimSize', "Storage pool's data storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - const storagePoolLogsStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.storagePoolLogsStorageClass', "Storage pool's logs storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const storagePoolLogsStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.storagePoolLogsStorageClaimSize', "Storage pool's logs storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - - const dataPoolLabel = createLabel(view, - { - text: localize('deployCluster.DataPool', "Data pool"), - width: inputWidth, - required: false - }); - const dataPoolDataStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.dataPoolDataStorageClass', "Data pool's data storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const dataPoolDataStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.dataPoolDataStorageClaimSize', "Data pool's data storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - const dataPoolLogsStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.dataPoolLogsStorageClass', "Data pool's logs storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const dataPoolLogsStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.dataPoolLogsStorageClaimSize', "Data pool's logs storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - - - const sqlServerMasterLabel = createLabel(view, - { - text: localize('deployCluster.MasterSqlText', "SQL Server Master"), - width: inputWidth, - required: false - }); - - const sqlServerMasterDataStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.sqlServerMasterDataStorageClass', "SQL Server master's data storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const sqlServerMasterDataStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.sqlServerMasterDataStorageClaimSize', "SQL Server master's data storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - const sqlServerMasterLogsStorageClassInputInfo = createInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.sqlServerMasterLogsStorageClass', "SQL Server master's logs storage class"), width: inputWidth, required: false, placeHolder: hintTextForStorageFields }); - const sqlServerMasterLogsStorageClaimSizeInputInfo = createNumberInputBoxInputInfo(view, { ariaLabel: localize('deployCluster.sqlServerMasterLogsStorageClaimSize', "SQL Server master's logs storage claim size (Gigabytes)"), width: inputWidth, required: false, min: 1, placeHolder: hintTextForStorageFields }); - - this.onNewInputComponentCreated(VariableNames.ControllerDataStorageClassName_VariableName, controllerDataStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.ControllerDataStorageSize_VariableName, controllerDataStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.ControllerLogsStorageClassName_VariableName, controllerLogsStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.ControllerLogsStorageSize_VariableName, controllerLogsStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.HDFSDataStorageClassName_VariableName, storagePoolDataStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.HDFSDataStorageSize_VariableName, storagePoolDataStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.HDFSLogsStorageClassName_VariableName, storagePoolLogsStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.HDFSLogsStorageSize_VariableName, storagePoolLogsStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.DataPoolDataStorageClassName_VariableName, dataPoolDataStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.DataPoolDataStorageSize_VariableName, dataPoolDataStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.DataPoolLogsStorageClassName_VariableName, dataPoolLogsStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.DataPoolLogsStorageSize_VariableName, dataPoolLogsStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.SQLServerDataStorageClassName_VariableName, sqlServerMasterDataStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.SQLServerDataStorageSize_VariableName, sqlServerMasterDataStorageClaimSizeInputInfo); - this.onNewInputComponentCreated(VariableNames.SQLServerLogsStorageClassName_VariableName, sqlServerMasterLogsStorageClassInputInfo); - this.onNewInputComponentCreated(VariableNames.SQLServerLogsStorageSize_VariableName, sqlServerMasterLogsStorageClaimSizeInputInfo); - - const storageSettingTable = view.modelBuilder.declarativeTable() - .withProps( - { - columns: [ - this.createStorageSettingColumn(localize('deployCluster.ServiceName', "Service name"), false), - this.createStorageSettingColumn(localize('deployCluster.DataStorageClassName', "Storage class for data")), - this.createStorageSettingColumn(localize('deployCluster.DataClaimSize', "Claim size for data (GB)")), - this.createStorageSettingColumn(localize('deployCluster.LogStorageClassName', "Storage class for logs")), - this.createStorageSettingColumn(localize('deployCluster.LogsClaimSize', "Claim size for logs (GB)")) - ], - dataValues: [ - [{ value: controllerLabel }, { value: controllerDataStorageClassInputInfo.component }, { value: controllerDataStorageClaimSizeInputInfo.component }, { value: controllerLogsStorageClassInputInfo.component }, { value: controllerLogsStorageClaimSizeInputInfo.component }], - [{ value: storagePoolLabel }, { value: storagePoolDataStorageClassInputInfo.component }, { value: storagePoolDataStorageClaimSizeInputInfo.component }, { value: storagePoolLogsStorageClassInputInfo.component }, { value: storagePoolLogsStorageClaimSizeInputInfo.component }], - [{ value: dataPoolLabel }, { value: dataPoolDataStorageClassInputInfo.component }, { value: dataPoolDataStorageClaimSizeInputInfo.component }, { value: dataPoolLogsStorageClassInputInfo.component }, { value: dataPoolLogsStorageClaimSizeInputInfo.component }], - [{ value: sqlServerMasterLabel }, { value: sqlServerMasterDataStorageClassInputInfo.component }, { value: sqlServerMasterDataStorageClaimSizeInputInfo.component }, { value: sqlServerMasterLogsStorageClassInputInfo.component }, { value: sqlServerMasterLogsStorageClaimSizeInputInfo.component }] - ], - ariaLabel: localize('deployCluster.StorageSettings', "Storage settings") - }) - .component(); - return createGroupContainer(view, [storageSettingTable], { - header: localize('deployCluster.StorageSectionTitle', "Storage settings"), - collapsible: true, - collapsed: false - }); - } - - private createStorageSettingColumn(title: string, showText: boolean = true): azdata.DeclarativeTableColumn { - return { - displayName: showText ? title : '', - ariaLabel: title, - valueType: azdata.DeclarativeDataType.component, - isReadOnly: true, - width: inputWidth, - headerCssStyles: { - 'border': 'none', - 'font-weight': 'inherit' - }, - rowCssStyles: { - 'border': 'none' - } - }; - } - - public override async onEnter(): Promise { - this.setInputBoxValue(VariableNames.ComputePoolScale_VariableName); - this.setInputBoxValue(VariableNames.DataPoolScale_VariableName); - this.setInputBoxValue(VariableNames.HDFSPoolScale_VariableName); - this.setInputBoxValue(VariableNames.SparkPoolScale_VariableName); - this.setCheckboxValue(VariableNames.IncludeSpark_VariableName); - this.setInputBoxValue(VariableNames.ControllerPort_VariableName); - this.setInputBoxValue(VariableNames.SQLServerPort_VariableName); - this.setInputBoxValue(VariableNames.GateWayPort_VariableName); - this.setInputBoxValue(VariableNames.ServiceProxyPort_VariableName); - this.setInputBoxValue(VariableNames.AppServiceProxyPort_VariableName); - this.setInputBoxValue(VariableNames.ReadableSecondaryPort_VariableName); - this.setInputBoxValue(VariableNames.GatewayDNSName_VariableName); - this.setInputBoxValue(VariableNames.AppServiceProxyDNSName_VariableName); - this.setInputBoxValue(VariableNames.SQLServerDNSName_VariableName); - this.setInputBoxValue(VariableNames.ReadableSecondaryDNSName_VariableName); - this.setInputBoxValue(VariableNames.ServiceProxyDNSName_VariableName); - this.setInputBoxValue(VariableNames.ControllerDNSName_VariableName); - this.setInputBoxValue(VariableNames.ControllerDataStorageClassName_VariableName); - this.setInputBoxValue(VariableNames.ControllerDataStorageSize_VariableName); - this.setInputBoxValue(VariableNames.ControllerLogsStorageClassName_VariableName); - this.setInputBoxValue(VariableNames.ControllerLogsStorageSize_VariableName); - this.endpointHeaderRow.clearItems(); - const adAuth = this._model.authenticationMode === AuthenticationMode.ActiveDirectory; - const sqlServerScale = this.wizard.model.getIntegerValue(VariableNames.SQLServerScale_VariableName); - - this.endpointSection.collapsed = !adAuth; - if (adAuth) { - this.endpointHeaderRow.addItems([this.endpointNameColumnHeader, this.dnsColumnHeader, this.portColumnHeader]); - } else { - this.endpointHeaderRow.addItems([this.endpointNameColumnHeader, this.portColumnHeader]); - } - - getInputBoxComponent(VariableNames.ControllerDNSName_VariableName, this.inputComponents).required = adAuth; - getInputBoxComponent(VariableNames.GatewayDNSName_VariableName, this.inputComponents).required = adAuth; - getInputBoxComponent(VariableNames.AppServiceProxyDNSName_VariableName, this.inputComponents).required = adAuth; - getInputBoxComponent(VariableNames.ServiceProxyDNSName_VariableName, this.inputComponents).required = adAuth; - getInputBoxComponent(VariableNames.SQLServerDNSName_VariableName, this.inputComponents).required = adAuth; - getInputBoxComponent(VariableNames.ReadableSecondaryDNSName_VariableName, this.inputComponents).required = adAuth && sqlServerScale > 1; - getInputBoxComponent(VariableNames.ReadableSecondaryPort_VariableName, this.inputComponents).required = sqlServerScale > 1; - this.loadEndpointRow(this.controllerEndpointRow, this.controllerNameLabel, this.controllerDNSInput, this.controllerPortInput); - this.loadEndpointRow(this.gatewayEndpointRow, this.gatewayNameLabel, this.gatewayDNSInput, this.gatewayPortInput); - this.loadEndpointRow(this.sqlServerEndpointRow, this.SqlServerNameLabel, this.sqlServerDNSInput, this.sqlServerPortInput); - this.loadEndpointRow(this.appServiceProxyEndpointRow, this.appServiceProxyNameLabel, this.appServiceProxyDNSInput, this.appServiceProxyPortInput); - this.loadEndpointRow(this.serviceProxyEndpointRow, this.serviceProxyNameLabel, this.serviceProxyDNSInput, this.serviceProxyPortInput); - const sqlServerScaleDropdown = getDropdownComponent(VariableNames.SQLServerScale_VariableName, this.inputComponents); - if (sqlServerScale > 1) { - sqlServerScaleDropdown.values = ['3', '4', '5', '6', '7', '8', '9']; - this.loadEndpointRow(this.readableSecondaryEndpointRow, this.readableSecondaryNameLabel, this.readableSecondaryDNSInput, this.readableSecondaryPortInput); - } else { - this.readableSecondaryEndpointRow.clearItems(); - sqlServerScaleDropdown.values = ['1']; - } - sqlServerScaleDropdown.value = sqlServerScale.toString(); - - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - this.wizard.wizardObject.message = { text: '' }; - if (pcInfo.newPage > pcInfo.lastPage) { - const sparkEnabled = Number.parseInt(getInputBoxComponent(VariableNames.SparkPoolScale_VariableName, this.inputComponents).value!) !== 0 - || getCheckboxComponent(VariableNames.IncludeSpark_VariableName, this.inputComponents).checked!; - - let errorMessage: string | undefined; - if (!sparkEnabled) { - errorMessage = localize('deployCluster.SparkMustBeIncluded', "Invalid Spark configuration, you must check the 'Include Spark' checkbox or set the 'Spark pool instances' to at least 1."); - } - if (errorMessage) { - this.wizard.wizardObject.message = { - text: errorMessage, - level: azdata.window.MessageLevel.Error - }; - } - return sparkEnabled; - } - return true; - }); - } - - public override async onLeave(): Promise { - await setModelValues(this.inputComponents, this.wizard.model); - this.wizard.wizardObject.registerNavigationValidator((pcInfo) => { - return true; - }); - } - - private setInputBoxValue(variableName: string): void { - getInputBoxComponent(variableName, this.inputComponents).value = this.wizard.model.getStringValue(variableName); - } - - private setCheckboxValue(variableName: string): void { - getCheckboxComponent(variableName, this.inputComponents).checked = this.wizard.model.getBooleanValue(variableName); - } - - private loadEndpointRow(row: azdata.FlexContainer, label: azdata.TextComponent, dnsInput: azdata.InputBoxComponent, portInput: azdata.InputBoxComponent): void { - row.clearItems(); - const itemLayout: azdata.FlexItemLayout = { CSSStyles: { 'margin-right': '20px' } }; - row.addItem(label); - if (this._model.authenticationMode === AuthenticationMode.ActiveDirectory) { - row.addItem(dnsInput, itemLayout); - } - row.addItem(portInput); - } -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/summaryPage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/summaryPage.ts deleted file mode 100644 index 561de85be6..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/summaryPage.ts +++ /dev/null @@ -1,434 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ -import * as azdata from 'azdata'; -import * as nls from 'vscode-nls'; -import { SectionInfo, FieldType, LabelPosition, BdcDeploymentType, FontWeight } from '../../../interfaces'; -import { createSection, createGroupContainer, createFlexContainer, createLabel } from '../../modelViewUtils'; -import * as VariableNames from '../constants'; -import { AuthenticationMode, DeployClusterWizardModel } from '../deployClusterWizardModel'; -import * as localizedConstants from '../../../localizedConstants'; -import { ResourceTypePage } from '../../resourceTypePage'; -const localize = nls.loadMessageBundle(); - -export class SummaryPage extends ResourceTypePage { - private formItems: azdata.FormComponent[] = []; - private form!: azdata.FormBuilder; - private view!: azdata.ModelView; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.summaryPageTitle', "Summary"), '', _model.wizard); - } - - public initialize(): void { - this.pageObject.registerContent((view: azdata.ModelView) => { - this.view = view; - this.form = view.modelBuilder.formContainer(); - return view.initializeModel(this.form!.withLayout({ width: '100%' }).component()); - }); - } - - public override async onEnter(): Promise { - this._model.showCustomButtons(); - this.formItems.forEach(item => { - this.form!.removeFormItem(item); - }); - this.formItems = []; - - const deploymentTargetSectionInfo: SectionInfo = { - labelPosition: LabelPosition.Left, - labelWidth: '150px', - inputWidth: '200px', - title: localize('deployCluster.DeploymentTarget', "Deployment target"), - rows: [ - { - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.Kubeconfig', "Kube config"), - defaultValue: this.wizard.model.getStringValue(VariableNames.KubeConfigPath_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ClusterContext', "Cluster context"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ClusterContext_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - } - ] - }; - - const clusterSectionInfo: SectionInfo = { - labelPosition: LabelPosition.Left, - labelWidth: '150px', - inputWidth: '200px', - title: localize('deployCluster.ClusterSettings', "Cluster settings"), - rows: [ - { - - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.DeploymentProfile', "Deployment profile"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DeploymentProfile_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ClusterName', "Cluster name"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ClusterName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }, { - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ControllerUsername', "Controller username"), - defaultValue: this.wizard.model.getStringValue(VariableNames.AdminUserName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.AuthenticationMode', "Authentication mode"), - defaultValue: this._model.authenticationMode === AuthenticationMode.ActiveDirectory ? - localize('deployCluster.AuthenticationMode.ActiveDirectory', "Active Directory") : - localize('deployCluster.AuthenticationMode.Basic', "Basic"), - labelCSSStyles: { fontWeight: FontWeight.Bold } - } - ] - } - ] - }; - - if (this._model.authenticationMode === AuthenticationMode.ActiveDirectory) { - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.OuDistinguishedName', "Organizational unit"), - defaultValue: this._model.getStringValue(VariableNames.OrganizationalUnitDistinguishedName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.DomainControllerFQDNs', "Domain controller FQDNs"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DomainControllerFQDNs_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.DomainDNSIPAddresses', "Domain DNS IP addresses"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DomainDNSIPAddresses_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.DomainDNSName', "Domain DNS name"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DomainDNSName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ClusterAdmins', "Cluster admin group"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ClusterAdmins_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ClusterUsers', "Cluster users"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ClusterUsers_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.AppOwners', "App owners"), - defaultValue: this.wizard.model.getStringValue(VariableNames.AppOwners_VariableName, ''), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.AppReaders', "App readers"), - defaultValue: this.wizard.model.getStringValue(VariableNames.AppReaders_VariableName, ''), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.Subdomain', "Subdomain"), - defaultValue: this.wizard.model.getStringValue(VariableNames.Subdomain_VariableName, ''), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.AccountPrefix', "Account prefix"), - defaultValue: this.wizard.model.getStringValue(VariableNames.AccountPrefix_VariableName, ''), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - clusterSectionInfo.rows!.push({ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.DomainServiceAccountUserName', "Service account username"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DomainServiceAccountUserName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localizedConstants.realm, - defaultValue: this.wizard.model.getStringValue(VariableNames.Realm_VariableName, ''), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }); - } - - const azureSectionInfo: SectionInfo = { - labelPosition: LabelPosition.Left, - labelWidth: '150px', - inputWidth: '200px', - title: localize('deployCluster.AzureSettings', "Azure settings"), - rows: [{ - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.SubscriptionId', "Subscription id"), - defaultValue: this.wizard.model.getStringValue(VariableNames.SubscriptionId_VariableName) || localize('deployCluster.DefaultSubscription', "Default Azure Subscription"), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ResourceGroup', "Resource group"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ResourceGroup_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - } - ] - }, { - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.Location', "Location"), - defaultValue: this.wizard.model.getStringValue(VariableNames.Location_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.AksClusterName', "AKS cluster name"), - defaultValue: this.wizard.model.getStringValue(VariableNames.AksName_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - } - ] - }, { - items: [ - { - type: FieldType.ReadonlyText, - label: localize('deployCluster.VMSize', "VM size"), - defaultValue: this.wizard.model.getStringValue(VariableNames.VMSize_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.VMCount', "VM count"), - defaultValue: this.wizard.model.getStringValue(VariableNames.VMCount_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - } - ] - } - ] - }; - - const scaleSectionInfo: SectionInfo = { - labelPosition: LabelPosition.Left, - labelWidth: '150px', - inputWidth: '200px', - title: localize('deployCluster.ScaleSettings', "Scale settings"), - rows: [ - { - items: [{ - type: FieldType.ReadonlyText, - label: localize('deployCluster.MasterSqlServerInstances', "SQL Server master instances"), - defaultValue: this.wizard.model.getStringValue(VariableNames.SQLServerScale_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.ComputePoolInstances', "Compute pool instances"), - defaultValue: this.wizard.model.getStringValue(VariableNames.ComputePoolScale_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }, { - items: [{ - type: FieldType.ReadonlyText, - label: localize('deployCluster.DataPoolInstances', "Data pool instances"), - defaultValue: this.wizard.model.getStringValue(VariableNames.DataPoolScale_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }, { - type: FieldType.ReadonlyText, - label: localize('deployCluster.SparkPoolInstances', "Spark pool instances"), - defaultValue: this.wizard.model.getStringValue(VariableNames.SparkPoolScale_VariableName), - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - }, { - items: [{ - type: FieldType.ReadonlyText, - label: localize('deployCluster.StoragePoolInstances', "Storage pool (HDFS) instances"), - defaultValue: `${this.wizard.model.getStringValue(VariableNames.HDFSPoolScale_VariableName)} ${this.wizard.model.getBooleanValue(VariableNames.IncludeSpark_VariableName) ? localize('deployCluster.WithSpark', "(Spark included)") : ''}`, - labelCSSStyles: { fontWeight: FontWeight.Bold } - }] - } - ] - }; - - const createSectionFunc = async (sectionInfo: SectionInfo): Promise => { - return { - title: '', - component: await createSection({ - container: this.wizard.wizardObject, - inputComponents: this._model.inputComponents, - sectionInfo: sectionInfo, - view: this.view, - onNewDisposableCreated: () => { }, - onNewInputComponentCreated: () => { }, - onNewValidatorCreated: () => { }, - toolsService: this.wizard.toolsService - }) - }; - }; - - if (this._model.deploymentType === BdcDeploymentType.ExistingAKS || this._model.deploymentType === BdcDeploymentType.ExistingKubeAdm) { - const deploymentTargetSection = await createSectionFunc(deploymentTargetSectionInfo); - this.formItems.push(deploymentTargetSection); - } - - const clusterSection = await createSectionFunc(clusterSectionInfo); - const scaleSection = await createSectionFunc(scaleSectionInfo); - const endpointSection = { - title: '', - component: this.createEndpointSection() - }; - const storageSection = { - title: '', - component: this.createStorageSection() - }; - if (this.wizard.model.getStringValue(VariableNames.AksName_VariableName)) { - const azureSection = await createSectionFunc(azureSectionInfo); - this.formItems.push(azureSection); - } - - this.formItems.push(clusterSection, scaleSection, endpointSection, storageSection); - this.form.addFormItems(this.formItems); - } - - public override async onLeave(): Promise { - this._model.hideCustomButtons(); - this.wizard.wizardObject.message = { text: '' }; - } - - private getStorageSettingValue(propertyName: string, defaultValuePropertyName: string): string | undefined { - const value = this.wizard.model.getStringValue(propertyName); - return (value === undefined || value === '') ? this.wizard.model.getStringValue(defaultValuePropertyName) : value; - } - - private createStorageSection(): azdata.GroupContainer { - const serviceNameColumn: azdata.TableColumn = { - value: localize('deployCluster.ServiceName', "Service"), - width: 150 - }; - const dataStorageClassColumn: azdata.TableColumn = { - value: localize('deployCluster.DataStorageClassName', "Storage class for data"), - width: 180 - }; - const dataStorageSizeColumn: azdata.TableColumn = { - value: localize('deployCluster.DataClaimSize', "Claim size for data (GB)"), - width: 180 - }; - const logStorageClassColumn: azdata.TableColumn = { - value: localize('deployCluster.LogStorageClassName', "Storage class for logs"), - width: 180 - }; - const logStorageSizeColumn: azdata.TableColumn = { - value: localize('deployCluster.LogsClaimSize', "Claim size for logs (GB)"), - width: 180 - }; - - const storageTableTitle = localize('deployCluster.StorageSettings', "Storage settings"); - const storageTable = this.view.modelBuilder.table().withProps({ - title: storageTableTitle, - ariaLabel: storageTableTitle, - data: [ - [ - localize('deployCluster.ControllerText', "Controller"), - this.wizard.model.getStringValue(VariableNames.ControllerDataStorageClassName_VariableName), - this.wizard.model.getStringValue(VariableNames.ControllerDataStorageSize_VariableName), - this.wizard.model.getStringValue(VariableNames.ControllerLogsStorageClassName_VariableName), - this.wizard.model.getStringValue(VariableNames.ControllerLogsStorageSize_VariableName)], - [ - localize('deployCluster.StoragePool', "Storage pool (HDFS)"), - this.getStorageSettingValue(VariableNames.HDFSDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.HDFSDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName), - this.getStorageSettingValue(VariableNames.HDFSLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.HDFSLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName) - ], [ - localize('deployCluster.DataText', "Data"), - this.getStorageSettingValue(VariableNames.DataPoolDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.DataPoolDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName), - this.getStorageSettingValue(VariableNames.DataPoolLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.DataPoolLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName) - ], [ - localize('deployCluster.MasterSqlText', "SQL Server Master"), - this.getStorageSettingValue(VariableNames.SQLServerDataStorageClassName_VariableName, VariableNames.ControllerDataStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.SQLServerDataStorageSize_VariableName, VariableNames.ControllerDataStorageSize_VariableName), - this.getStorageSettingValue(VariableNames.SQLServerLogsStorageClassName_VariableName, VariableNames.ControllerLogsStorageClassName_VariableName), - this.getStorageSettingValue(VariableNames.SQLServerLogsStorageSize_VariableName, VariableNames.ControllerLogsStorageSize_VariableName) - ] - ], - columns: [serviceNameColumn, dataStorageClassColumn, dataStorageSizeColumn, logStorageClassColumn, logStorageSizeColumn], - width: '1000px', - height: '140px' - }).component(); - return createGroupContainer(this.view, [storageTable], { - header: localize('deployCluster.StorageSettings', "Storage settings"), - collapsible: true - }); - } - - private createEndpointSection(): azdata.GroupContainer { - const endpointRows = [ - this.createEndpointRow(localize('deployCluster.ControllerText', "Controller"), VariableNames.ControllerDNSName_VariableName, VariableNames.ControllerPort_VariableName), - this.createEndpointRow(localize('deployCluster.SqlServerText', "SQL Server Master"), VariableNames.SQLServerDNSName_VariableName, VariableNames.SQLServerPort_VariableName), - this.createEndpointRow(localize('deployCluster.GatewayText', "Gateway"), VariableNames.GatewayDNSName_VariableName, VariableNames.GateWayPort_VariableName), - this.createEndpointRow(localize('deployCluster.AppServiceProxyText', "Application proxy"), VariableNames.AppServiceProxyDNSName_VariableName, VariableNames.AppServiceProxyPort_VariableName), - this.createEndpointRow(localize('deployCluster.ServiceProxyText', "Management proxy"), VariableNames.ServiceProxyDNSName_VariableName, VariableNames.ServiceProxyPort_VariableName) - ]; - - if (this.wizard.model.getIntegerValue(VariableNames.SQLServerScale_VariableName) > 1) { - endpointRows.push( - this.createEndpointRow(localize('deployCluster.ReadableSecondaryText', "Readable secondary"), VariableNames.ReadableSecondaryDNSName_VariableName, VariableNames.ReadableSecondaryPort_VariableName) - ); - } - return createGroupContainer(this.view, endpointRows, { - header: localize('deployCluster.EndpointSettings', "Endpoint settings"), - collapsible: true - }); - } - - private createEndpointRow(name: string, dnsVariableName: string, portVariableName: string): azdata.FlexContainer { - const items = []; - items.push(createLabel(this.view, { text: name, width: '150px', cssStyles: { fontWeight: FontWeight.Bold } })); - if (this._model.authenticationMode === AuthenticationMode.ActiveDirectory) { - items.push(createLabel(this.view, { - text: this.wizard.model.getStringValue(dnsVariableName)!, width: '200px' - })); - } - items.push(createLabel(this.view, { - text: this.wizard.model.getStringValue(portVariableName)!, width: '100px' - })); - return createFlexContainer(this.view, items); - } -} diff --git a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/targetClusterPage.ts b/extensions/resource-deployment/src/ui/deployClusterWizard/pages/targetClusterPage.ts deleted file mode 100644 index 730a8cf4ef..0000000000 --- a/extensions/resource-deployment/src/ui/deployClusterWizard/pages/targetClusterPage.ts +++ /dev/null @@ -1,176 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import * as azdata from 'azdata'; -import * as os from 'os'; -import * as vscode from 'vscode'; -import * as nls from 'vscode-nls'; -import { KubeClusterContext } from '../../../services/kubeService'; -import { ResourceTypePage } from '../../resourceTypePage'; -import { ClusterContext_VariableName, KubeConfigPath_VariableName } from '../constants'; -import { DeployClusterWizardModel } from '../deployClusterWizardModel'; -const localize = nls.loadMessageBundle(); - -const ClusterRadioButtonGroupName = 'ClusterRadioGroup'; - -export class TargetClusterContextPage extends ResourceTypePage { - private existingClusterControl: azdata.FlexContainer | undefined; - private clusterContextsLabel: azdata.TextComponent | undefined; - private errorLoadingClustersLabel: azdata.TextComponent | undefined; - private clusterContextList: azdata.DivContainer | undefined; - private clusterContextLoadingComponent: azdata.LoadingComponent | undefined; - private configFileInput: azdata.InputBoxComponent | undefined; - private browseFileButton: azdata.ButtonComponent | undefined; - private loadDefaultKubeConfigFile: boolean = true; - private view: azdata.ModelView | undefined; - - constructor(private _model: DeployClusterWizardModel) { - super(localize('deployCluster.TargetClusterContextPageTitle', "Target cluster context"), - localize('deployCluster.TargetClusterContextPageDescription', "Select the kube config file and then select a cluster context from the list"), _model.wizard); - } - - public initialize(): void { - this.pageObject.registerContent((view: azdata.ModelView) => { - this.view = view; - this.initExistingClusterControl(); - let formBuilder = view.modelBuilder.formContainer().withFormItems( - [ - { - component: this.existingClusterControl!, - title: '' - } - ], - { - horizontal: false - } - ).withLayout({ width: '100%', height: '100%' }); - const form = formBuilder.withLayout({ width: '100%' }).component(); - return view.initializeModel(form); - }); - } - - public override async onEnter(): Promise { - if (this.loadDefaultKubeConfigFile) { - let defaultKubeConfigPath = this._model.kubeService.getDefaultConfigPath(); - this.loadClusterContexts(defaultKubeConfigPath); - this.loadDefaultKubeConfigFile = false; - } - - this.wizard.wizardObject.registerNavigationValidator((e) => { - if (e.lastPage > e.newPage) { - this.wizard.wizardObject.message = { text: '' }; - return true; - } - let clusterSelected = this.wizard.model.getStringValue(ClusterContext_VariableName) !== undefined; - if (!clusterSelected) { - this.wizard.wizardObject.message = { - text: localize('deployCluster.ClusterContextNotSelectedMessage', "Please select a cluster context."), - level: azdata.window.MessageLevel.Error - }; - } - return clusterSelected; - }); - } - - public override async onLeave(): Promise { - this.wizard.wizardObject.registerNavigationValidator((e) => { - return true; - }); - } - - private initExistingClusterControl(): void { - let self = this; - const labelWidth = '150px'; - let configFileLabel = this.view!.modelBuilder.text().withProps({ value: localize('deployCluster.kubeConfigFileLabelText', "Kube config file path") }).component(); - configFileLabel.width = labelWidth; - this.configFileInput = this.view!.modelBuilder.inputBox().withProps({ width: '300px' }).component(); - this.configFileInput.enabled = false; - this.browseFileButton = this.view!.modelBuilder.button().withProps({ label: localize('deployCluster.browseText', "Browse"), width: '100px', secondary: true }).component(); - let configFileContainer = this.view!.modelBuilder.flexContainer() - .withLayout({ flexFlow: 'row', alignItems: 'baseline' }) - .withItems([configFileLabel, this.configFileInput, this.browseFileButton], { CSSStyles: { 'margin-right': '10px' } }).component(); - this.clusterContextsLabel = this.view!.modelBuilder.text().withProps({ value: localize('deployCluster.clusterContextsLabelText', "Cluster Contexts") }).component(); - this.clusterContextsLabel.width = labelWidth; - this.errorLoadingClustersLabel = this.view!.modelBuilder.text().withProps({ value: localize('deployCluster.errorLoadingClustersText', "No cluster information is found in the config file or an error ocurred while loading the config file") }).component(); - this.clusterContextList = this.view!.modelBuilder.divContainer().component(); - this.clusterContextLoadingComponent = this.view!.modelBuilder.loadingComponent().withItem(this.clusterContextList).component(); - this.existingClusterControl = this.view!.modelBuilder.divContainer().withProps({ clickable: false }).component(); - let clusterContextContainer = this.view!.modelBuilder.flexContainer().withLayout({ flexFlow: 'row', alignItems: 'start' }).component(); - clusterContextContainer.addItem(this.clusterContextsLabel, { flex: '0 0 auto' }); - clusterContextContainer.addItem(this.clusterContextLoadingComponent, { flex: '0 0 auto', CSSStyles: { 'width': '400px', 'margin-left': '10px', 'margin-top': '10px' } }); - - this.existingClusterControl.addItem(configFileContainer, { CSSStyles: { 'margin-top': '0px' } }); - this.existingClusterControl.addItem(clusterContextContainer, { - CSSStyles: { 'margin- top': '10px' } - }); - - this.wizard.registerDisposable(this.browseFileButton.onDidClick(async () => { - let fileUris = await vscode.window.showOpenDialog( - { - canSelectFiles: true, - canSelectFolders: false, - canSelectMany: false, - defaultUri: vscode.Uri.file(os.homedir()), - openLabel: localize('deployCluster.selectKubeConfigFileText', "Select"), - filters: { - 'Config Files': ['*'], - } - } - ); - - if (!fileUris || fileUris.length === 0) { - return; - } - self.clusterContextList!.clearItems(); - - let fileUri = fileUris[0]; - - self.loadClusterContexts(fileUri.fsPath); - })); - } - - private async loadClusterContexts(configPath: string): Promise { - this.clusterContextLoadingComponent!.loading = true; - this.wizard.model.setPropertyValue(ClusterContext_VariableName, undefined); - this.wizard.wizardObject.message = { text: '' }; - let self = this; - this.configFileInput!.value = configPath; - - let clusterContexts: KubeClusterContext[] = []; - try { - clusterContexts = await this._model.kubeService.getClusterContexts(configPath); - } catch (error) { - this.wizard.wizardObject.message = { - text: localize('deployCluster.ConfigParseError', "Failed to load the config file"), - description: error.message || error, level: azdata.window.MessageLevel.Error - }; - } - if (clusterContexts.length !== 0) { - self.wizard.model.setPropertyValue(KubeConfigPath_VariableName, configPath); - let options = clusterContexts.map(clusterContext => { - let option = this.view!.modelBuilder.radioButton().withProps({ - label: clusterContext.name, - checked: clusterContext.isCurrentContext, - name: ClusterRadioButtonGroupName - }).component(); - - if (clusterContext.isCurrentContext) { - self.wizard.model.setPropertyValue(ClusterContext_VariableName, clusterContext.name); - self.wizard.wizardObject.message = { text: '' }; - } - - this.wizard.registerDisposable(option.onDidClick(() => { - self.wizard.model.setPropertyValue(ClusterContext_VariableName, clusterContext.name); - self.wizard.wizardObject.message = { text: '' }; - })); - return option; - }); - self.clusterContextList!.addItems(options); - } else { - self.clusterContextList!.addItem(this.errorLoadingClustersLabel!); - } - this.clusterContextLoadingComponent!.loading = false; - } -} diff --git a/extensions/resource-deployment/src/ui/notebookWizard/notebookWizardModel.ts b/extensions/resource-deployment/src/ui/notebookWizard/notebookWizardModel.ts index 5de1053ffd..51ed58ebf6 100644 --- a/extensions/resource-deployment/src/ui/notebookWizard/notebookWizardModel.ts +++ b/extensions/resource-deployment/src/ui/notebookWizard/notebookWizardModel.ts @@ -8,7 +8,7 @@ import { IToolsService } from '../../services/toolsService'; import { InputComponents, setModelValues } from '../modelViewUtils'; import { ResourceTypeModel } from '../resourceTypeModel'; import { ResourceTypeWizard } from '../resourceTypeWizard'; -import { DeploymentType, NotebookWizardDeploymentProvider, NotebookWizardInfo } from '../../interfaces'; +import { NotebookWizardDeploymentProvider, NotebookWizardInfo } from '../../interfaces'; import { IPlatformService } from '../../services/platformService'; import { NotebookWizardAutoSummaryPage } from './notebookWizardAutoSummaryPage'; import { NotebookWizardPage } from './notebookWizardPage'; @@ -47,10 +47,6 @@ export class NotebookWizardModel extends ResourceTypeModel { this.wizard.wizardObject.generateScriptButton.label = this.notebookProvider.notebookWizard.scriptAction?.label || loc.scriptToNotebook; } - public get deploymentType(): DeploymentType | undefined { - return this.notebookProvider.notebookWizard.type; - } - public initialize(): void { this.wizard.setPages(this.getPages()); } diff --git a/extensions/resource-deployment/src/ui/resourceTypeWizard.ts b/extensions/resource-deployment/src/ui/resourceTypeWizard.ts index 5290a3a1c3..846d6a7d93 100644 --- a/extensions/resource-deployment/src/ui/resourceTypeWizard.ts +++ b/extensions/resource-deployment/src/ui/resourceTypeWizard.ts @@ -5,12 +5,10 @@ import * as azdata from 'azdata'; import * as vscode from 'vscode'; -import { DeploymentProvider, InitialVariableValues, instanceOfAzureSQLDBDeploymentProvider, instanceOfAzureSQLVMDeploymentProvider, instanceOfNotebookWizardDeploymentProvider, instanceOfWizardDeploymentProvider, ResourceType, ResourceTypeOptionValue } from '../interfaces'; -import { DeployClusterWizardModel } from './deployClusterWizard/deployClusterWizardModel'; +import { DeploymentProvider, InitialVariableValues, instanceOfAzureSQLDBDeploymentProvider, instanceOfAzureSQLVMDeploymentProvider, instanceOfNotebookWizardDeploymentProvider, ResourceType, ResourceTypeOptionValue } from '../interfaces'; import { DeployAzureSQLVMWizardModel } from './deployAzureSQLVMWizard/deployAzureSQLVMWizardModel'; import { WizardPageInfo } from './wizardPageInfo'; import { IKubeService } from '../services/kubeService'; -import { IAzdataService } from '../services/azdataService'; import { INotebookService } from '../services/notebookService'; import { IToolsService } from '../services/toolsService'; import { IPlatformService } from '../services/platformService'; @@ -55,7 +53,6 @@ export class ResourceTypeWizard { constructor( public resourceType: ResourceType, public _kubeService: IKubeService, - public azdataService: IAzdataService, public notebookService: INotebookService, public toolsService: IToolsService, public platformService: IPlatformService, @@ -124,9 +121,7 @@ export class ResourceTypeWizard { private getResourceProviderModel(): ResourceTypeModel { - if (instanceOfWizardDeploymentProvider(this.provider)) { - return new DeployClusterWizardModel(this.provider, this); - } else if (instanceOfAzureSQLVMDeploymentProvider(this.provider)) { + if (instanceOfAzureSQLVMDeploymentProvider(this.provider)) { return new DeployAzureSQLVMWizardModel(this.provider, this); } else if (instanceOfNotebookWizardDeploymentProvider(this.provider)) { return new NotebookWizardModel(this.provider, this); diff --git a/package.json b/package.json index 7b5526ebe0..f0f1c9b4d6 100755 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "azuredatastudio", "version": "1.41.0", - "distro": "c6da977529b6719ff1974faa53a73abe01ec13ac", + "distro": "a58380f8678270cb60e90ca5edd8c407908f8108", "author": { "name": "Microsoft Corporation" }, diff --git a/resources/localization/LCL/de/big-data-cluster.xlf.lcl b/resources/localization/LCL/de/big-data-cluster.xlf.lcl deleted file mode 100644 index 9af3534cae..0000000000 --- a/resources/localization/LCL/de/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/es/big-data-cluster.xlf.lcl b/resources/localization/LCL/es/big-data-cluster.xlf.lcl deleted file mode 100644 index 72fe35a094..0000000000 --- a/resources/localization/LCL/es/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/fr/big-data-cluster.xlf.lcl b/resources/localization/LCL/fr/big-data-cluster.xlf.lcl deleted file mode 100644 index be63af12e8..0000000000 --- a/resources/localization/LCL/fr/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/it/big-data-cluster.xlf.lcl b/resources/localization/LCL/it/big-data-cluster.xlf.lcl deleted file mode 100644 index 3470e7a27b..0000000000 --- a/resources/localization/LCL/it/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/ja/big-data-cluster.xlf.lcl b/resources/localization/LCL/ja/big-data-cluster.xlf.lcl deleted file mode 100644 index f3a6afc856..0000000000 --- a/resources/localization/LCL/ja/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/ko/big-data-cluster.xlf.lcl b/resources/localization/LCL/ko/big-data-cluster.xlf.lcl deleted file mode 100644 index a8e67fca2e..0000000000 --- a/resources/localization/LCL/ko/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/pt-BR/big-data-cluster.xlf.lcl b/resources/localization/LCL/pt-BR/big-data-cluster.xlf.lcl deleted file mode 100644 index 61f12ea725..0000000000 --- a/resources/localization/LCL/pt-BR/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/ru/big-data-cluster.xlf.lcl b/resources/localization/LCL/ru/big-data-cluster.xlf.lcl deleted file mode 100644 index d228846291..0000000000 --- a/resources/localization/LCL/ru/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/zh-Hans/big-data-cluster.xlf.lcl b/resources/localization/LCL/zh-Hans/big-data-cluster.xlf.lcl deleted file mode 100644 index a909cc1d82..0000000000 --- a/resources/localization/LCL/zh-Hans/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/localization/LCL/zh-Hant/big-data-cluster.xlf.lcl b/resources/localization/LCL/zh-Hant/big-data-cluster.xlf.lcl deleted file mode 100644 index 373ad03a59..0000000000 --- a/resources/localization/LCL/zh-Hant/big-data-cluster.xlf.lcl +++ /dev/null @@ -1,1655 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/resources/xlf/LocProject.json b/resources/xlf/LocProject.json index 86d47d331b..a4d255998c 100644 --- a/resources/xlf/LocProject.json +++ b/resources/xlf/LocProject.json @@ -44,12 +44,6 @@ "LclFile": "resources\\localization\\LCL\\{Lang}\\azurehybridtoolkit.xlf.lcl", "CopyOption": "LangIDOnPath", "OutputPath": "resources\\xlf" - }, - { - "SourceFile": "resources\\xlf\\en\\big-data-cluster.xlf", - "LclFile": "resources\\localization\\LCL\\{Lang}\\big-data-cluster.xlf.lcl", - "CopyOption": "LangIDOnPath", - "OutputPath": "resources\\xlf" }, { "SourceFile": "resources\\xlf\\en\\cms.xlf", diff --git a/resources/xlf/de/big-data-cluster.de.xlf b/resources/xlf/de/big-data-cluster.de.xlf deleted file mode 100644 index c94f861321..0000000000 --- a/resources/xlf/de/big-data-cluster.de.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Fehler beim Löschen des eingebundenen Volumes. - - - Error retrieving BDC status from {0} - Fehler beim Abrufen des BDC-Status von "{0}". - - - Error retrieving cluster config from {0} - Fehler beim Abrufen der Clusterkonfiguration aus "{0}". - - - Error retrieving endpoints from {0} - Fehler beim Abrufen von Endpunkten von "{0}". - - - Error creating mount - Fehler beim Erstellen des eingebundenen Volumes. - - - Error refreshing mount - Fehler beim Aktualisieren des eingebundenen Volumes. - - - Error getting mount status - Fehler beim Abrufen des Einbindungsstatus. - - - Error during authentication - Fehler während der Authentifizierung. - - - You do not have permission to log into this cluster using Windows Authentication - Sie sind nicht berechtigt, sich unter Verwendung der Windows-Authentifizierung bei diesem Cluster anzumelden. - - - This cluster does not support Windows authentication - Dieser Cluster unterstützt die Windows-Authentifizierung nicht. - - - - - - - Add - Hinzufügen - - - Add New Controller - Neuen Controller hinzufügen - - - Basic - Standard - - - Big Data Cluster overview - Überblick über Big Data-Cluster - - - Cluster Details - Clusterdetails - - - Cluster Overview - Clusterübersicht - - - Cluster Properties - Clustereigenschaften - - - Cluster State - Clusterstatus - - - Copy - Kopieren - - - Endpoint - Endpunkt - - - Health Status - Integritätsstatus - - - Health Status Details - Details zum Integritätsstatus - - - Instance - Instanz - - - Last Updated : {0} - Zuletzt aktualisiert: {0} - - - Loading cluster state completed - Der Clusterzustand wurde vollständig geladen. - - - Loading health status completed - Der Integritätsstatus wurde vollständig geladen. - - - Logs - Protokolle - - - Metrics and Logs - Metriken und Protokolle - - - The dashboard requires a connection. Please click retry to enter your credentials. - Das Dashboard erfordert eine Verbindung. Klicken Sie auf "Wiederholen", um Ihre Anmeldeinformationen einzugeben. - - - Node Metrics - Knotenmetriken - - - N/A - N/V - - - Refresh - Aktualisieren - - - Service - Dienst - - - Service Endpoints - Dienstendpunkte - - - Service Name - Dienstname - - - SQL Metrics - SQL-Metriken - - - State - Status - - - Status Icon - Statussymbol - - - Troubleshoot - Problembehandlung - - - Unexpected error occurred: {0} - Unerwarteter Fehler: {0} - - - View - Ansicht - - - View Details - Details anzeigen - - - View Error Details - Fehlerdetails anzeigen - - - View Kibana Logs {0} - Kibana-Protokolle anzeigen {0} - - - View Node Metrics {0} - Knotenmetriken anzeigen {0} - - - View SQL Metrics {0} - SQL-Metriken anzeigen {0} - - - Cancel - Abbrechen - - - Cluster Management URL - Clusterverwaltungs-URL - - - Connect to Controller - Verbindung mit Controller herstellen - - - Endpoint '{0}' copied to clipboard - Endpunkt "{0}" in Zwischenablage kopiert - - - Delete Mount - Einbindung löschen - - - Deleting HDFS Mount on path {0} - Das eingebundene HDFS-Volume im Pfad "{0}" wird gelöscht. - - - Delete mount request submitted - Die Anforderung zum Löschen des eingebundenen Volumes wurde übermittelt. - - - Unexpected error retrieving BDC Endpoints: {0} - Unerwarteter Fehler beim Abrufen von BDC-Endpunkten: {0} - - - Password is required - Kennwort erforderlich - - - Username is required - Benutzername erforderlich - - - Cluster Connection - Clusterverbindung - - - Windows Authentication - Windows-Authentifizierung - - - Mount credentials for authentication to remote data source for reads - Anmeldeinformationen zur Authentifizierung des eingebundenen Volumes bei der Remotedatenquelle, um Lesevorgänge auszuführen - - - Credentials - Anmeldeinformationen - - - Mount HDFS Folder - HDFS-Ordner einbinden - - - Bad formatting of credentials at {0} - Fehlerhafte Formatierung von Anmeldeinformationen in "{0}". - - - Unknown error occurred during the mount process - Unbekannter Fehler während des Einbindungsvorgangs. - - - Login to controller failed - Fehler bei der Anmeldung beim Controller. - - - Login to controller failed: {0} - Fehler bei der Anmeldung beim Controller: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Pfad zu einem neuen (nicht vorhandenen) Verzeichnis, das Sie dem eingebundenen Volume zuordnen möchten - - - HDFS Path - HDFS-Pfad - - - Mount Configuration - Einbindungskonfiguration - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - Der URI zur Remotedatenquelle, Beispiel für ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - Remote-URI - - - Mounting HDFS folder is complete - Das Einbinden des HDFS-Ordners ist abgeschlossen. - - - Error mounting folder: {0} - Fehler beim Einbinden des Ordners: {0} - - - Mounting is likely to complete, check back later to verify - Der Einbindungsvorgang wird wahrscheinlich abgeschlossen. Überprüfen Sie dies später. - - - Mounting HDFS folder on path {0} - Der HDFS-Ordner wird im Pfad "{0}" eingebunden. - - - Mount creation has started - Die Erstellung des eingebundenen Volumes wurde gestartet. - - - OK - OK - - - Password - Kennwort - - - Refresh Mount - Einbindung aktualisieren - - - Refreshing HDFS Mount on path {0} - Das eingebundene HDFS-Volume im Pfad "{0}" wird aktualisiert. - - - Refresh mount request submitted - Die Anforderung zum Aktualisieren des eingebundenen Volumes wurde übermittelt. - - - Remember Password - Kennwort speichern - - - Authentication type - Authentifizierungstyp - - - URL - URL - - - Username - Benutzername - - - - - - - Unexpected error loading saved controllers: {0} - Unerwarteter Fehler beim Laden gespeicherter Controller: {0} - - - - - - - Healthy - Fehlerfrei - - - Unhealthy - Fehlerhaft - - - Application Proxy - Anwendungsproxy - - - Cluster Management Service - Clusterverwaltungsdienst - - - Gateway to access HDFS files, Spark - Gateway für den Zugriff auf HDFS-Dateien, Spark - - - Metrics Dashboard - Metrikdashboard - - - Log Search Dashboard - Dashboard für Protokollsuche - - - Proxy for running Spark statements, jobs, applications - Proxy zum Ausführen von Spark-Anweisungen, -Aufträgen und -Anwendungen - - - Management Proxy - Verwaltungsproxy - - - Management Proxy - Verwaltungsproxy - - - Spark Jobs Management and Monitoring Dashboard - Dashboard zum Verwalten und Überwachen von Spark-Aufträgen - - - SQL Server Master Instance Front-End - Front-End der SQL Server-Masterinstanz - - - HDFS File System Proxy - HDFS-Dateisystemproxy - - - Spark Diagnostics and Monitoring Dashboard - Dashboard zur Spark-Diagnose und -Überwachung - - - Unexpected error retrieving BDC Endpoints: {0} - Unerwarteter Fehler beim Abrufen von BDC-Endpunkten: {0} - - - App - App - - - Control - Steuerelement - - - Gateway - Gateway - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - Upgrade wird angewendet - - - Applying Managed Upgrade - Verwaltetes Upgrade wird angewendet - - - Creating - Wird erstellt - - - Creating Groups - Gruppen werden erstellt - - - Creating Kerberos Delegation Setup - Setup für Kerberos-Delegierung wird erstellt - - - Creating Resources - Ressourcen werden erstellt - - - Creating Secrets - Geheimnisse werden erstellt - - - Deleted - Gelöscht - - - Deleting - Wird gelöscht - - - Error - Fehler - - - Managed Upgrading - Verwaltetes Upgrade wird durchgeführt - - - Ready - Bereit - - - Rollback - Rollback - - - Rollback Complete - Rollback abgeschlossen - - - Rollback In Progress - Rollback wird ausgeführt - - - Running - Wird ausgeführt - - - Upgrade Paused - Upgrade angehalten - - - Upgrading - Upgrade wird durchgeführt - - - Waiting - Warten - - - Waiting For Deletion - Warten auf Löschung - - - Waiting For Groups - Auf Gruppen warten - - - Waiting For Kerberos Delegation Setup - Auf Setup der Kerberos-Delegierung warten - - - Waiting For Resources - Auf Ressourcen warten - - - Waiting For Secrets - Auf Geheimnisse warten - - - Waiting For Upgrade - Warten auf Upgrade - - - - - - - Big Data Cluster Dashboard - - Big Data-Cluster-Dashboard – - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - Das Big Data-Cluster-Add-On wird eingestellt, und die entsprechende Azure Data Studio-Funktionalität wird in einer zukünftigen Version entfernt. Weitere Informationen hierzu und zum zukünftigen Support finden Sie [hier](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - Es wurden keine Informationen zum Controllerendpunkt gefunden. - - - Are you sure you want to remove '{0}'? - Möchten Sie "{0}" entfernen? - - - No - Nein - - - Yes - Ja - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - Ich akzeptiere {0}, {1} und {2}. - - - azdata License Terms - azdata-Lizenzbedingungen - - - SQL Server License Terms - SQL Server-Lizenzbedingungen - - - AKS cluster name - Name des AKS-Clusters - - - Region - Region - - - Resource group name - Ressourcengruppenname - - - Azure settings - Azure-Einstellungen - - - Subscription id - Abonnement-ID - - - Use my default Azure subscription - Mein Azure-Standardabonnement verwenden - - - VM count - VM-Anzahl - - - VM size - VM-Größe - - - Cluster name - Clustername - - - SQL Server Big Data Cluster settings - Einstellungen für SQL Server-Big-Data-Cluster - - - Confirm password - Kennwort bestätigen - - - Controller username - Benutzername für Controller - - - Capacity for data (GB) - Kapazität für Daten (GB) - - - Deployment target - Bereitstellungsziel - - - Existing Azure Kubernetes Service Cluster - Vorhandener Azure Kubernetes Service-Cluster - - - Existing Azure Red Hat OpenShift cluster - Vorhandener Azure Red Hat OpenShift-Cluster - - - Existing Kubernetes Cluster (kubeadm) - Vorhandener Kubernetes-Cluster (kubeadm) - - - Existing OpenShift cluster - Vorhandener OpenShift-Cluster - - - New Azure Kubernetes Service Cluster - Neuer Azure Kubernetes Service-Cluster - - - Capacity for logs (GB) - Kapazität für Protokolle (GB) - - - Password - Kennwort - - - Storage class name - Name der Speicherklasse - - - Big Data Cluster - Big Data-Cluster - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Bei Festlegung auf TRUE werden SSL-Überprüfungsfehler für SQL Server-Big Data Cluster-Endpunkte wie HDFS, Spark und Controller ignoriert. - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - Es sind keine Big Data-Cluster-Controller für SQL registriert. [Weitere Informationen](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Controller verbinden](command:bigDataClusters.command.connectController) - - - Loading controllers... - Controller werden geladen... - - - Connect to Existing Controller - Verbindung mit vorhandenem Controller herstellen - - - Create New Controller - Neuen Controller erstellen - - - Delete Mount - Eingebundenes Volume löschen - - - Manage - Verwalten - - - Mount HDFS - HDFS einbinden - - - Refresh - Aktualisieren - - - Refresh Mount - Eingebundenes Volume aktualisieren - - - Remove Controller - Controller entfernen - - - Support for managing SQL Server Big Data Clusters - Unterstützung für die Verwaltung von SQL Server-Big Data-Clustern - - - Microsoft Privacy Statement - Microsoft-Datenschutzbestimmungen - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - Mit dem Big Data-Cluster von SQL Server können Sie skalierbare Cluster aus SQL Server-, Spark- und HDFS-Containern bereitstellen, die in Kubernetes ausgeführt werden. - - - SQL Server Big Data Cluster - SQL Server-Big Data-Cluster - - - SQL Server Big Data Clusters - SQL Server-Big Data-Cluster - - - Version - Version - - - - \ No newline at end of file diff --git a/resources/xlf/en/big-data-cluster.xlf b/resources/xlf/en/big-data-cluster.xlf deleted file mode 100644 index b455d0d64b..0000000000 --- a/resources/xlf/en/big-data-cluster.xlf +++ /dev/null @@ -1,559 +0,0 @@ - - - - - Error deleting mount - - - Error retrieving BDC status from {0} - - - Error retrieving cluster config from {0} - - - Error retrieving endpoints from {0} - - - Error creating mount - - - Error refreshing mount - - - Error getting mount status - - - Error during authentication - - - You do not have permission to log into this cluster using Windows Authentication - - - This cluster does not support Windows authentication - - - - - Add - - - Add New Controller - - - Basic - - - Big Data Cluster overview - - - Cluster Details - - - Cluster Overview - - - Cluster Properties - - - Cluster State - - - Copy - - - Endpoint - - - Health Status - - - Health Status Details - - - Instance - - - Last Updated : {0} - - - Loading cluster state completed - - - Loading health status completed - - - Logs - - - Metrics and Logs - - - The dashboard requires a connection. Please click retry to enter your credentials. - - - Node Metrics - - - N/A - - - Refresh - - - Service - - - Service Endpoints - - - Service Name - - - SQL Metrics - - - State - - - Status Icon - - - Troubleshoot - - - Unexpected error occurred: {0} - - - View - - - View Details - - - View Error Details - - - View Kibana Logs {0} - - - View Node Metrics {0} - - - View SQL Metrics {0} - - - Cancel - - - Cluster Management URL - - - Connect to Controller - - - Endpoint '{0}' copied to clipboard - - - Delete Mount - - - Deleting HDFS Mount on path {0} - - - Delete mount request submitted - - - Unexpected error retrieving BDC Endpoints: {0} - - - Password is required - - - Username is required - - - Cluster Connection - - - Windows Authentication - - - Mount credentials for authentication to remote data source for reads - - - Credentials - - - Mount HDFS Folder - - - Bad formatting of credentials at {0} - - - Unknown error occurred during the mount process - - - Login to controller failed - - - Login to controller failed: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - - - HDFS Path - - - Mount Configuration - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - - - Mounting HDFS folder is complete - - - Error mounting folder: {0} - - - Mounting is likely to complete, check back later to verify - - - Mounting HDFS folder on path {0} - - - Mount creation has started - - - OK - - - Password - - - Refresh Mount - - - Refreshing HDFS Mount on path {0} - - - Refresh mount request submitted - - - Remember Password - - - Authentication type - - - URL - - - Username - - - - - Unexpected error loading saved controllers: {0} - - - - - Healthy - - - Unhealthy - - - Application Proxy - - - Cluster Management Service - - - Gateway to access HDFS files, Spark - - - Metrics Dashboard - - - Log Search Dashboard - - - Proxy for running Spark statements, jobs, applications - - - Management Proxy - - - Management Proxy - - - Spark Jobs Management and Monitoring Dashboard - - - SQL Server Master Instance Front-End - - - HDFS File System Proxy - - - Spark Diagnostics and Monitoring Dashboard - - - Unexpected error retrieving BDC Endpoints: {0} - - - App - - - Control - - - Gateway - - - HDFS - - - Spark - - - SQL Server - - - Applying Upgrade - - - Applying Managed Upgrade - - - Creating - - - Creating Groups - - - Creating Kerberos Delegation Setup - - - Creating Resources - - - Creating Secrets - - - Deleted - - - Deleting - - - Error - - - Managed Upgrading - - - Ready - - - Rollback - - - Rollback Complete - - - Rollback In Progress - - - Running - - - Upgrade Paused - - - Upgrading - - - Waiting - - - Waiting For Deletion - - - Waiting For Groups - - - Waiting For Kerberos Delegation Setup - - - Waiting For Resources - - - Waiting For Secrets - - - Waiting For Upgrade - - - - - Big Data Cluster Dashboard - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - - - Are you sure you want to remove '{0}'? - - - No - - - Yes - - - - - SQL Server 2019 - - - I accept {0}, {1} and {2}. - - - azdata License Terms - - - SQL Server License Terms - - - AKS cluster name - - - Region - - - Resource group name - - - Azure settings - - - Subscription id - - - Use my default Azure subscription - - - VM count - - - VM size - - - Cluster name - - - SQL Server Big Data Cluster settings - - - Confirm password - - - Controller username - - - Capacity for data (GB) - - - Deployment target - - - Existing Azure Kubernetes Service Cluster - - - Existing Azure Red Hat OpenShift cluster - - - Existing Kubernetes Cluster (kubeadm) - - - Existing OpenShift cluster - - - New Azure Kubernetes Service Cluster - - - Capacity for logs (GB) - - - Password - - - Storage class name - - - Big Data Cluster - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - - - Loading controllers... - - - Connect to Existing Controller - - - Create New Controller - - - Delete Mount - - - Manage - - - Mount HDFS - - - Refresh - - - Refresh Mount - - - Remove Controller - - - Support for managing SQL Server Big Data Clusters - - - Microsoft Privacy Statement - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - - - SQL Server Big Data Cluster - - - SQL Server Big Data Clusters - - - Version - - - \ No newline at end of file diff --git a/resources/xlf/es/big-data-cluster.es.xlf b/resources/xlf/es/big-data-cluster.es.xlf deleted file mode 100644 index c17dae3e98..0000000000 --- a/resources/xlf/es/big-data-cluster.es.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Error al eliminar el montaje - - - Error retrieving BDC status from {0} - Error al recuperar el estado de BDC desde {0} - - - Error retrieving cluster config from {0} - Error al recuperar la configuración del clúster de {0}. - - - Error retrieving endpoints from {0} - Error al recuperar los puntos de conexión desde {0} - - - Error creating mount - Error al crear el montaje - - - Error refreshing mount - Error al actualizar el montaje - - - Error getting mount status - Error al obtener el estado del montaje. - - - Error during authentication - Error durante la autenticación - - - You do not have permission to log into this cluster using Windows Authentication - No tiene permiso para iniciar sesión en este clúster mediante la autenticación de Windows - - - This cluster does not support Windows authentication - Este clúster no admite la autenticación de Windows - - - - - - - Add - Agregar - - - Add New Controller - Agregar nuevo controlador - - - Basic - Básico - - - Big Data Cluster overview - Información general del clúster de macrodatos - - - Cluster Details - Detalles del clúster - - - Cluster Overview - Información general del clúster - - - Cluster Properties - Propiedades del clúster - - - Cluster State - Estado del clúster - - - Copy - Copiar - - - Endpoint - Punto de conexión - - - Health Status - Estado de mantenimiento - - - Health Status Details - Detalles del estado de mantenimiento - - - Instance - Instancia - - - Last Updated : {0} - Ultima actualización: {0} - - - Loading cluster state completed - La carga del estado del clúster se ha completado. - - - Loading health status completed - La carga del estado de mantenimiento se ha completado. - - - Logs - Registros - - - Metrics and Logs - Métricas y registros - - - The dashboard requires a connection. Please click retry to enter your credentials. - El panel requiere una conexión. Haga clic en Reintentar para escribir sus credenciales. - - - Node Metrics - Métricas de nodo - - - N/A - N/D - - - Refresh - Actualizar - - - Service - Servicio - - - Service Endpoints - Puntos de conexión del servicio - - - Service Name - Nombre del servicio - - - SQL Metrics - Métricas de SQL - - - State - Estado - - - Status Icon - Icono de estado - - - Troubleshoot - Solucionar problemas - - - Unexpected error occurred: {0} - Error inesperado: {0} - - - View - Ver - - - View Details - Ver detalles - - - View Error Details - Ver detalles del error - - - View Kibana Logs {0} - Ver registros de Kibana {0} - - - View Node Metrics {0} - Ver métricas del nodo {0} - - - View SQL Metrics {0} - Ver métricas SQL {0} - - - Cancel - Cancelar - - - Cluster Management URL - Dirección URL de administración del clúster - - - Connect to Controller - Conexión con el controlador - - - Endpoint '{0}' copied to clipboard - Punto de conexión "{0}" copiado en el Portapapeles - - - Delete Mount - Eliminación de un montaje - - - Deleting HDFS Mount on path {0} - Eliminando el montaje de HDFS en la ruta de acceso {0} - - - Delete mount request submitted - Solicitud de eliminación de montaje enviada - - - Unexpected error retrieving BDC Endpoints: {0} - Error inesperado al recuperar puntos de conexión de BDC: {0} - - - Password is required - Se necesita la contraseña. - - - Username is required - Es necesario especificar el nombre de usuario. - - - Cluster Connection - Conexión del clúster - - - Windows Authentication - Autenticación de Windows - - - Mount credentials for authentication to remote data source for reads - Credenciales de montaje para la autenticación en el origen de datos remoto para lecturas - - - Credentials - Credenciales - - - Mount HDFS Folder - Montaje de carpetas HDFS - - - Bad formatting of credentials at {0} - Formato incorrecto de las credenciales en {0} - - - Unknown error occurred during the mount process - Error desconocido durante el proceso de montaje - - - Login to controller failed - Error al iniciar sesión en el controlador - - - Login to controller failed: {0} - Error al iniciar sesión en el controlador: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Ruta de acceso a un nuevo directorio (no existente) que desea asociar al montaje - - - HDFS Path - Ruta HDFS - - - Mount Configuration - Configuración del montaje - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - El URI del origen de datos remoto. Ejemplo de ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - URI remoto - - - Mounting HDFS folder is complete - El montaje de la carpeta HDFS se ha completado - - - Error mounting folder: {0} - Error al montar la carpeta: {0} - - - Mounting is likely to complete, check back later to verify - Es probable que el montaje se complete, compruébelo de nuevo más tarde - - - Mounting HDFS folder on path {0} - Montaje de la carpeta HDFS en la ruta de acceso {0} - - - Mount creation has started - La creación del montaje ha comenzado - - - OK - Aceptar - - - Password - Contraseña - - - Refresh Mount - Actualización del montaje - - - Refreshing HDFS Mount on path {0} - Actualizando el montaje de HDFS en la ruta de acceso {0} - - - Refresh mount request submitted - Solicitud de actualización de montaje enviada - - - Remember Password - Recordar contraseña - - - Authentication type - Tipo de autenticación - - - URL - Dirección URL - - - Username - Nombre de usuario - - - - - - - Unexpected error loading saved controllers: {0} - Error inesperado al cargar los controladores guardados: {0}. - - - - - - - Healthy - En buen estado - - - Unhealthy - En mal estado - - - Application Proxy - Proxy de aplicación - - - Cluster Management Service - Servicio de administración de clústeres - - - Gateway to access HDFS files, Spark - Puerta de enlace para acceder a archivos HDFS, Spark - - - Metrics Dashboard - Panel de métricas - - - Log Search Dashboard - Panel de búsqueda de registros - - - Proxy for running Spark statements, jobs, applications - Proxy para ejecutar instrucciones, trabajos, aplicaciones de Spark - - - Management Proxy - Proxy de administración - - - Management Proxy - Proxy de administración - - - Spark Jobs Management and Monitoring Dashboard - Panel de supervisión y administración de trabajos de Spark - - - SQL Server Master Instance Front-End - Front-end de instancia maestra de SQL Server - - - HDFS File System Proxy - Proxy de sistema de archivos HDFS - - - Spark Diagnostics and Monitoring Dashboard - Panel de diagnóstico y supervisión de Spark - - - Unexpected error retrieving BDC Endpoints: {0} - Error inesperado al recuperar puntos de conexión de BDC: {0} - - - App - Aplicación - - - Control - Control - - - Gateway - Puerta de enlace - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - Aplicando actualización - - - Applying Managed Upgrade - Aplicando la actualización administrada - - - Creating - Creando - - - Creating Groups - Creando grupos - - - Creating Kerberos Delegation Setup - Creación de la configuración de la delegación Kerberos - - - Creating Resources - Creando recursos - - - Creating Secrets - Creando secretos - - - Deleted - Eliminado - - - Deleting - Eliminando - - - Error - Error - - - Managed Upgrading - Actualización administrada - - - Ready - Listo - - - Rollback - Reversión - - - Rollback Complete - Reversión finalizada - - - Rollback In Progress - Reversión en curso - - - Running - En ejecución - - - Upgrade Paused - Actualización en pausa - - - Upgrading - Actualizando - - - Waiting - Esperando - - - Waiting For Deletion - Esperando la eliminación - - - Waiting For Groups - Esperando grupos - - - Waiting For Kerberos Delegation Setup - Esperando la configuración de la delegación Kerberos - - - Waiting For Resources - Esperando recursos - - - Waiting For Secrets - Esperando secretos - - - Waiting For Upgrade - Esperando la actualización - - - - - - - Big Data Cluster Dashboard - - Panel del clúster de macrodatos: - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - El complemento clúster de macrodatos se va a retirar y su funcionalidad de Azure Data Studio se quitará en una próxima versión. Obtenga más información sobre esto y soporte técnico en el futuro [aquí](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - No se encontró información del punto de conexión del controlador - - - Are you sure you want to remove '{0}'? - ¿Seguro que quiere quitar "{0}"? - - - No - No - - - Yes - - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - Acepto {0}, {1} y {2}. - - - azdata License Terms - Términos de licencia de azdata - - - SQL Server License Terms - Términos de licencia de SQL Server - - - AKS cluster name - Nombre del clúster de AKS - - - Region - Región - - - Resource group name - Nombre del grupo de recursos - - - Azure settings - Configuración de Azure - - - Subscription id - Identificador de suscripción - - - Use my default Azure subscription - Usar mi suscripción predeterminada de Azure - - - VM count - Recuento de máquinas virtuales - - - VM size - Tamaño de la máquina virtual - - - Cluster name - Nombre del clúster - - - SQL Server Big Data Cluster settings - Configuración del clúster de macrodatos de SQL Server - - - Confirm password - Confirmar contraseña - - - Controller username - Nombre de usuario del controlador - - - Capacity for data (GB) - Capacidad para datos (GB) - - - Deployment target - Destino de implementación - - - Existing Azure Kubernetes Service Cluster - Clúster de Azure Kubernetes Service existente - - - Existing Azure Red Hat OpenShift cluster - Clúster de Red Hat OpenShift en Azure existente - - - Existing Kubernetes Cluster (kubeadm) - Clúster Kubernetes existente (kubeadm) - - - Existing OpenShift cluster - Clúster de OpenShift existente - - - New Azure Kubernetes Service Cluster - Nuevo clúster de Azure Kubernetes Service - - - Capacity for logs (GB) - Capacidad para registros (GB) - - - Password - Contraseña - - - Storage class name - Nombre de la clase de almacenamiento - - - Big Data Cluster - Clúster de macrodatos - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Ignorar los errores de verificación SSL en los puntos de conexión del clúster de macrodatos de SQL Server, como HDFS, Spark y Controller, si es true - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - No se ha registrado ningún controlador del clúster de macrodatos de SQL. [Más información](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Conexión de un controlador](command:bigDataClusters.command.connectController) - - - Loading controllers... - Se están cargando los controladores... - - - Connect to Existing Controller - Conexión con el controlador existente - - - Create New Controller - Creación de un nuevo controlador - - - Delete Mount - Eliminar montaje - - - Manage - Administrar - - - Mount HDFS - Montar HDFS - - - Refresh - Actualizar - - - Refresh Mount - Actualizar montaje - - - Remove Controller - Eliminación de un controlador - - - Support for managing SQL Server Big Data Clusters - Compatibilidad con la administración de clústeres de macrodatos de SQL Server - - - Microsoft Privacy Statement - Declaración de privacidad de Microsoft - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - El clúster de macrodatos de SQL Server le permite implementar clústeres escalables de contenedores de SQL Server, Spark y HDFS que se ejecutan en Kubernetes - - - SQL Server Big Data Cluster - Clúster de macrodatos de SQL Server - - - SQL Server Big Data Clusters - Clústeres de macrodatos de SQL Server - - - Version - Versión - - - - \ No newline at end of file diff --git a/resources/xlf/fr/big-data-cluster.fr.xlf b/resources/xlf/fr/big-data-cluster.fr.xlf deleted file mode 100644 index a611288b49..0000000000 --- a/resources/xlf/fr/big-data-cluster.fr.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Erreur de suppression du montage - - - Error retrieving BDC status from {0} - Erreur de récupération de l'état BDC de {0} - - - Error retrieving cluster config from {0} - Erreur de récupération de la configuration de cluster à partir de {0} - - - Error retrieving endpoints from {0} - Erreur de récupération des points de terminaison de {0} - - - Error creating mount - Erreur de création du montage - - - Error refreshing mount - Erreur d'actualisation du montage - - - Error getting mount status - Erreur d'obtention de l'état de montage - - - Error during authentication - Erreur pendant l'authentification - - - You do not have permission to log into this cluster using Windows Authentication - Vous n'avez pas l'autorisation de vous connecter à ce cluster à l'aide de l'authentification Windows - - - This cluster does not support Windows authentication - Ce cluster ne prend pas en charge l'authentification Windows - - - - - - - Add - Ajouter - - - Add New Controller - Ajouter un nouveau contrôleur - - - Basic - De base - - - Big Data Cluster overview - Vue d'ensemble du cluster Big Data - - - Cluster Details - Détails du cluster - - - Cluster Overview - Vue d'ensemble du cluster - - - Cluster Properties - Propriétés du cluster - - - Cluster State - État du cluster - - - Copy - Copier - - - Endpoint - Point de terminaison - - - Health Status - État d'intégrité - - - Health Status Details - Détails de l'état d'intégrité - - - Instance - Instance - - - Last Updated : {0} - Dernière mise à jour : {0} - - - Loading cluster state completed - L'état de cluster a été chargé - - - Loading health status completed - L'état d'intégrité a été chargé - - - Logs - Journaux - - - Metrics and Logs - Métriques et journaux - - - The dashboard requires a connection. Please click retry to enter your credentials. - Le tableau de bord nécessite une connexion. Cliquez sur Réessayer pour entrer vos informations d'identification. - - - Node Metrics - Métriques de nœud - - - N/A - N/A - - - Refresh - Actualiser - - - Service - Service - - - Service Endpoints - Points de terminaison de service - - - Service Name - Nom du service - - - SQL Metrics - Métriques SQL - - - State - État - - - Status Icon - Icône d'état - - - Troubleshoot - Résoudre les problèmes - - - Unexpected error occurred: {0} - Erreur inattendue : {0} - - - View - Voir - - - View Details - Voir les détails - - - View Error Details - Voir les détails de l'erreur - - - View Kibana Logs {0} - Voir les journaux Kibana {0} - - - View Node Metrics {0} - Voir les métriques de nœud {0} - - - View SQL Metrics {0} - Voir les métriques SQL {0} - - - Cancel - Annuler - - - Cluster Management URL - URL de gestion de cluster - - - Connect to Controller - Se connecter au contrôleur - - - Endpoint '{0}' copied to clipboard - Point de terminaison '{0}' copié dans le Presse-papiers - - - Delete Mount - Supprimer le montage - - - Deleting HDFS Mount on path {0} - Suppression du montage HDFS sur le chemin {0} - - - Delete mount request submitted - Supprimer la demande de montage envoyée - - - Unexpected error retrieving BDC Endpoints: {0} - Erreur inattendue pendant la récupération des points de terminaison BDC : {0} - - - Password is required - Mot de passe obligatoire - - - Username is required - Nom d'utilisateur obligatoire - - - Cluster Connection - Connexion du cluster - - - Windows Authentication - Authentification Windows - - - Mount credentials for authentication to remote data source for reads - Informations d'identification de montage pour l'authentification auprès de la source de données distante pour les lectures - - - Credentials - Informations d'identification - - - Mount HDFS Folder - Monter le dossier HDFS - - - Bad formatting of credentials at {0} - Mise en forme incorrecte des informations d'identification sur {0} - - - Unknown error occurred during the mount process - Une erreur inconnue s'est produite pendant le processus de montage - - - Login to controller failed - La connexion au contrôleur a échoué - - - Login to controller failed: {0} - La connexion au contrôleur a échoué : {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Chemin d'un nouveau répertoire (non existant) à associer au montage - - - HDFS Path - Chemin HDFS - - - Mount Configuration - Configuration du montage - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - URI de la source de données distante. Exemple pour ADLS : abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - URI distant - - - Mounting HDFS folder is complete - Le montage du dossier HDFS est terminé - - - Error mounting folder: {0} - Erreur de montage du dossier {0} - - - Mounting is likely to complete, check back later to verify - Le montage va probablement être effectué, revenez vérifier plus tard - - - Mounting HDFS folder on path {0} - Montage du dossier HDFS sur le chemin {0} - - - Mount creation has started - La création du montage a commencé - - - OK - OK - - - Password - Mot de passe - - - Refresh Mount - Actualiser le montage - - - Refreshing HDFS Mount on path {0} - Actualisation du montage HDFS sur le chemin {0} - - - Refresh mount request submitted - Demande d'actualisation du montage envoyée - - - Remember Password - Se souvenir du mot de passe - - - Authentication type - Type d'authentification - - - URL - URL - - - Username - Nom d'utilisateur - - - - - - - Unexpected error loading saved controllers: {0} - Erreur inattendue pendant chargement des contrôleurs enregistrés : {0} - - - - - - - Healthy - Sain - - - Unhealthy - Non sain - - - Application Proxy - Proxy d'application - - - Cluster Management Service - Service de gestion de cluster - - - Gateway to access HDFS files, Spark - Passerelle d'accès aux fichiers HDFS, à Spark - - - Metrics Dashboard - Tableau de bord des métriques - - - Log Search Dashboard - Tableau de bord de recherche dans les journaux - - - Proxy for running Spark statements, jobs, applications - Proxy pour exécuter les instructions, travaux, applications Spark - - - Management Proxy - Proxy de gestion - - - Management Proxy - Proxy de gestion - - - Spark Jobs Management and Monitoring Dashboard - Tableau de bord de gestion et de supervision de travaux Spark - - - SQL Server Master Instance Front-End - Front-end de l'instance maître SQL Server - - - HDFS File System Proxy - Proxy du système de fichiers HDFS - - - Spark Diagnostics and Monitoring Dashboard - Tableau de bord de diagnostic et de supervision Spark - - - Unexpected error retrieving BDC Endpoints: {0} - Erreur inattendue pendant la récupération des points de terminaison BDC : {0} - - - App - Application - - - Control - Contrôle - - - Gateway - Passerelle - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - Serveur SQL - - - Applying Upgrade - Application de la mise à niveau - - - Applying Managed Upgrade - Application de la mise à niveau gérée - - - Creating - Création - - - Creating Groups - Création de groupes - - - Creating Kerberos Delegation Setup - Création de la configuration de délégation Kerberos - - - Creating Resources - Création de ressources - - - Creating Secrets - Création de secrets - - - Deleted - Supprimé - - - Deleting - Suppression - - - Error - Erreur - - - Managed Upgrading - Mise à niveau gérée - - - Ready - Prêt - - - Rollback - Restaurer - - - Rollback Complete - Restauration effectuée - - - Rollback In Progress - Restauration en cours - - - Running - En cours d'exécution - - - Upgrade Paused - Mise à niveau suspendue - - - Upgrading - Mise à niveau - - - Waiting - En attente - - - Waiting For Deletion - En attente de suppression - - - Waiting For Groups - Attente de groupes - - - Waiting For Kerberos Delegation Setup - En attente de la configuration de la délégation Kerberos - - - Waiting For Resources - En attente de ressources - - - Waiting For Secrets - En attente des secrets - - - Waiting For Upgrade - En attente de mise à niveau - - - - - - - Big Data Cluster Dashboard - - Tableau de bord de cluster Big Data - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - Le module complémentaire du cluster Big Data est en cours de retrait et Azure Data Studio fonctionnalité de celui-ci sera supprimée dans une prochaine version. En savoir plus à ce sujet et au support à l’avenir [here](https://go.microsoft.com/fwlink/?linkid=2207340) - - - Controller endpoint information was not found - Informations de point de terminaison du contrôleur introuvables - - - Are you sure you want to remove '{0}'? - Voulez-vous vraiment supprimer '{0}' ? - - - No - Non - - - Yes - Oui - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - J'accepte {0}, {1} et {2}. - - - azdata License Terms - Termes du contrat de licence azdata - - - SQL Server License Terms - Termes du contrat de licence SQL Server - - - AKS cluster name - Nom du cluster AKS - - - Region - Région - - - Resource group name - Nom de groupe de ressources - - - Azure settings - Paramètres Azure - - - Subscription id - ID d'abonnement - - - Use my default Azure subscription - Utiliser mon abonnement Azure par défaut - - - VM count - Nombre de machines virtuelles - - - VM size - Taille de machine virtuelle - - - Cluster name - Nom de cluster - - - SQL Server Big Data Cluster settings - Paramètres de cluster Big Data SQL Server - - - Confirm password - Confirmer le mot de passe - - - Controller username - Nom d'utilisateur du contrôleur - - - Capacity for data (GB) - Capacité de données (Go) - - - Deployment target - Cible de déploiement - - - Existing Azure Kubernetes Service Cluster - Cluster Azure Kubernetes Service existant - - - Existing Azure Red Hat OpenShift cluster - Cluster Azure Red Hat OpenShift existant - - - Existing Kubernetes Cluster (kubeadm) - Cluster Kubernetes existant (kubeadm) - - - Existing OpenShift cluster - Cluster OpenShift existant - - - New Azure Kubernetes Service Cluster - Nouveau cluster Azure Kubernetes Service - - - Capacity for logs (GB) - Capacité des journaux (Go) - - - Password - Mot de passe - - - Storage class name - Nom de la classe de stockage - - - Big Data Cluster - Cluster Big Data - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Ignorer les erreurs de vérification SSL sur les points de terminaison de cluster Big Data SQL Server de type HDFS, Spark et Contrôleur si la valeur est true - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - Aucun contrôleur de cluster Big Data SQL n'est inscrit. [En savoir plus](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connecter un contrôleur](command:bigDataClusters.command.connectController) - - - Loading controllers... - Chargement des contrôleurs... - - - Connect to Existing Controller - Se connecter au contrôleur existant - - - Create New Controller - Créer un contrôleur - - - Delete Mount - Supprimer le montage - - - Manage - Gérer - - - Mount HDFS - Monter HDFS - - - Refresh - Actualiser - - - Refresh Mount - Actualiser le montage - - - Remove Controller - Supprimer le contrôleur - - - Support for managing SQL Server Big Data Clusters - Prise en charge de la gestion des clusters Big Data SQL Server - - - Microsoft Privacy Statement - Déclaration de confidentialité Microsoft - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - Le cluster Big Data SQL Server vous permet de déployer des clusters scalables de conteneurs SQL Server, Spark et HDFS s'exécutant sur Kubernetes - - - SQL Server Big Data Cluster - Cluster Big Data SQL Server - - - SQL Server Big Data Clusters - Clusters Big Data SQL Server - - - Version - Version - - - - \ No newline at end of file diff --git a/resources/xlf/it/big-data-cluster.it.xlf b/resources/xlf/it/big-data-cluster.it.xlf deleted file mode 100644 index 133a0ad631..0000000000 --- a/resources/xlf/it/big-data-cluster.it.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Si è verificato un errore durante l'eliminazione del montaggio - - - Error retrieving BDC status from {0} - Si è verificato un errore durante il recupero dello stato del cluster Big Data da {0} - - - Error retrieving cluster config from {0} - Errore durante il recupero della configurazione del cluster da {0} - - - Error retrieving endpoints from {0} - Si è verificato un errore durante il recupero degli endpoint da {0} - - - Error creating mount - Si è verificato un errore durante la creazione del montaggio - - - Error refreshing mount - Si è verificato un errore durante l'aggiornamento del montaggio - - - Error getting mount status - Errore durante il recupero dello stato di montaggio - - - Error during authentication - Si è verificato un errore durante l'autenticazione - - - You do not have permission to log into this cluster using Windows Authentication - Non si è autorizzati ad accedere a questo cluster con Autenticazione di Windows - - - This cluster does not support Windows authentication - Questo cluster non supporta l'autenticazione di Windows - - - - - - - Add - Aggiungi - - - Add New Controller - Aggiungi nuovo controller - - - Basic - Di base - - - Big Data Cluster overview - Panoramica di Cluster Big Data - - - Cluster Details - Dettagli del cluster - - - Cluster Overview - Panoramica del cluster - - - Cluster Properties - Proprietà del cluster - - - Cluster State - Stato del cluster - - - Copy - Copia - - - Endpoint - Endpoint - - - Health Status - Stato integrità - - - Health Status Details - Dettagli sullo stato integrità - - - Instance - Istanza - - - Last Updated : {0} - Ultimo aggiornamento: {0} - - - Loading cluster state completed - Caricamento dello stato del cluster completato - - - Loading health status completed - Caricamento dello stato integrità completato - - - Logs - Log - - - Metrics and Logs - Metriche e log - - - The dashboard requires a connection. Please click retry to enter your credentials. - Il dashboard richiede una connessione. Fare clic su Riprova per immettere le credenziali. - - - Node Metrics - Metriche del nodo - - - N/A - N/D - - - Refresh - Aggiorna - - - Service - Servizio - - - Service Endpoints - Endpoint servizio - - - Service Name - Nome servizio - - - SQL Metrics - Metriche di SQL - - - State - Stato - - - Status Icon - Icona stato - - - Troubleshoot - Risoluzione dei problemi - - - Unexpected error occurred: {0} - Si è verificato un errore imprevisto: {0} - - - View - Visualizza - - - View Details - Visualizza dettagli - - - View Error Details - Visualizza dettagli dell'errore - - - View Kibana Logs {0} - Visualizza log di Kibana {0} - - - View Node Metrics {0} - Visualizza metriche del nodo {0} - - - View SQL Metrics {0} - Visualizza metriche di SQL {0} - - - Cancel - Annulla - - - Cluster Management URL - URL di gestione cluster - - - Connect to Controller - Connetti al controller - - - Endpoint '{0}' copied to clipboard - L'endpoint '{0}' è stato copiato negli Appunti - - - Delete Mount - Elimina montaggio - - - Deleting HDFS Mount on path {0} - Eliminazione del montaggio HDFS nel percorso {0} - - - Delete mount request submitted - Richiesta di eliminazione montaggio inviata - - - Unexpected error retrieving BDC Endpoints: {0} - Si è verificato un errore imprevisto durante il recupero degli endpoint BDC: {0} - - - Password is required - La password è obbligatoria - - - Username is required - Il nome utente è obbligatorio - - - Cluster Connection - Connessione cluster - - - Windows Authentication - Autenticazione di Windows - - - Mount credentials for authentication to remote data source for reads - Credenziali di montaggio per l'autenticazione all'origine dati remota per operazioni di lettura - - - Credentials - Credenziali - - - Mount HDFS Folder - Monta cartella HDFS - - - Bad formatting of credentials at {0} - Formattazione non valida delle credenziali alla posizione {0} - - - Unknown error occurred during the mount process - Si è verificato un errore sconosciuto durante il processo di montaggio - - - Login to controller failed - Accesso al controller non riuscito - - - Login to controller failed: {0} - Accesso al controller non riuscito: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Percorso di una nuova directory non esistente da associare al montaggio - - - HDFS Path - Percorso HDFS - - - Mount Configuration - Configurazione montaggio - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - URI dell'origine dati remota. Esempio per ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - URI del repository remoto - - - Mounting HDFS folder is complete - Il montaggio della cartella HDFS è stato completato - - - Error mounting folder: {0} - Si è verificato un errore durante il montaggio della cartella: {0} - - - Mounting is likely to complete, check back later to verify - Il montaggio verrà probabilmente completato. Per verificare, controllare più tardi - - - Mounting HDFS folder on path {0} - Montaggio della cartella HDFS nel percorso {0} - - - Mount creation has started - La creazione del montaggio è stata avviata - - - OK - OK - - - Password - Password - - - Refresh Mount - Aggiorna montaggio - - - Refreshing HDFS Mount on path {0} - Aggiornamento del montaggio HDFS nel percorso {0} - - - Refresh mount request submitted - Richiesta di aggiornamento montaggio inviata - - - Remember Password - Memorizza password - - - Authentication type - Tipo di autenticazione - - - URL - URL - - - Username - Nome utente - - - - - - - Unexpected error loading saved controllers: {0} - Errore imprevisto durante il caricamento dei controller salvati: {0} - - - - - - - Healthy - Integri - - - Unhealthy - Non integri - - - Application Proxy - Proxy dell'applicazione - - - Cluster Management Service - Servizio di gestione cluster - - - Gateway to access HDFS files, Spark - Gateway per l'accesso ai file HDFS, Spark - - - Metrics Dashboard - Dashboard di metriche - - - Log Search Dashboard - Dashboard di ricerca log - - - Proxy for running Spark statements, jobs, applications - Proxy per l'esecuzione di istruzioni, processi e applicazioni Spark - - - Management Proxy - Proxy di gestione - - - Management Proxy - Proxy di gestione - - - Spark Jobs Management and Monitoring Dashboard - Dashboard di gestione processi e monitoraggio di Spark - - - SQL Server Master Instance Front-End - Front-end dell'istanza master di SQL Server - - - HDFS File System Proxy - Proxy del file system HDFS - - - Spark Diagnostics and Monitoring Dashboard - Dashboard di diagnostica e monitoraggio di Spark - - - Unexpected error retrieving BDC Endpoints: {0} - Si è verificato un errore imprevisto durante il recupero degli endpoint BDC: {0} - - - App - App - - - Control - Controllo - - - Gateway - Gateway - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - Applicazione dell'aggiornamento - - - Applying Managed Upgrade - Applicazione dell'aggiornamento gestito - - - Creating - In fase di creazione - - - Creating Groups - Creazione dei gruppi - - - Creating Kerberos Delegation Setup - Creazione della configurazione per la delega Kerberos - - - Creating Resources - Creazione delle risorse - - - Creating Secrets - Creazione dei segreti - - - Deleted - Eliminato - - - Deleting - In fase di eliminazione - - - Error - Errore - - - Managed Upgrading - Aggiornamento gestito - - - Ready - Pronto - - - Rollback - Rollback - - - Rollback Complete - Rollback completato - - - Rollback In Progress - Rollback in corso - - - Running - In esecuzione - - - Upgrade Paused - Aggiornamento sospeso - - - Upgrading - In fase di aggiornamento - - - Waiting - In attesa - - - Waiting For Deletion - In attesa dell'eliminazione - - - Waiting For Groups - In attesa dei gruppi - - - Waiting For Kerberos Delegation Setup - In attesa della configurazione per la delega Kerberos - - - Waiting For Resources - In attesa delle risorse - - - Waiting For Secrets - In attesa dei segreti - - - Waiting For Upgrade - In attesa dell'aggiornamento - - - - - - - Big Data Cluster Dashboard - - Dashboard di Cluster Big Data - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - Il componente aggiuntivo Cluster Big Data verrà ritirato e Azure Data Studio funzionalità per il cluster verrà rimossa in una versione futura. Altre informazioni su questo aspetto e supporto in futuro [qui](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - Le informazioni sull'endpoint del controller non sono state trovate - - - Are you sure you want to remove '{0}'? - Rimuovere '{0}'? - - - No - No - - - Yes - - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - Accetto {0}, {1} e {2}. - - - azdata License Terms - Condizioni di licenza di azdata - - - SQL Server License Terms - Condizioni di licenza di SQL Server - - - AKS cluster name - Nome del cluster del servizio Azure Kubernetes - - - Region - Area - - - Resource group name - Nome del gruppo di risorse - - - Azure settings - Impostazioni di Azure - - - Subscription id - ID sottoscrizione - - - Use my default Azure subscription - Usa la sottoscrizione di Azure predefinita personale - - - VM count - Numero di macchine virtuali - - - VM size - Dimensioni della macchina virtuale - - - Cluster name - Nome del cluster - - - SQL Server Big Data Cluster settings - Impostazioni del cluster Big Data di SQL Server - - - Confirm password - Conferma password - - - Controller username - Nome utente del controller - - - Capacity for data (GB) - Capacità per i dati (GB) - - - Deployment target - Destinazione di distribuzione - - - Existing Azure Kubernetes Service Cluster - Cluster esistente del servizio Azure Kubernetes - - - Existing Azure Red Hat OpenShift cluster - Cluster Azure Red Hat OpenShift esistente - - - Existing Kubernetes Cluster (kubeadm) - Cluster Kubernetes esistente (kubeadm) - - - Existing OpenShift cluster - Cluster OpenShift esistente - - - New Azure Kubernetes Service Cluster - Nuovo cluster del servizio Azure Kubernetes - - - Capacity for logs (GB) - Capacità per i log (GB) - - - Password - Password - - - Storage class name - Nome della classe di archiviazione - - - Big Data Cluster - Cluster Big Data - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Se è true, ignora gli errori di verifica SSL in endpoint del cluster Big Data di SQL Server come HDFS, Spark e Controller - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - Non sono stati registrati controller del cluster Big Data di SQL. [Altre informazioni](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connetti controller](command:bigDataClusters.command.connectController) - - - Loading controllers... - Caricamento controller... - - - Connect to Existing Controller - Connetti al controller esistente - - - Create New Controller - Crea nuovo controller - - - Delete Mount - Elimina montaggio - - - Manage - Gestisci - - - Mount HDFS - Monta HDFS - - - Refresh - Aggiorna - - - Refresh Mount - Aggiorna montaggio - - - Remove Controller - Rimuovi controller - - - Support for managing SQL Server Big Data Clusters - Supporto per la gestione di cluster Big Data di SQL Server - - - Microsoft Privacy Statement - Informativa sulla privacy di Microsoft - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - Il cluster Big Data di SQL Server consente di distribuire cluster scalabili di contenitori SQL Server, Spark e HDFS in esecuzione in Kubernetes - - - SQL Server Big Data Cluster - Cluster Big Data di SQL Server - - - SQL Server Big Data Clusters - Cluster Big Data di SQL Server - - - Version - Versione - - - - \ No newline at end of file diff --git a/resources/xlf/ja/big-data-cluster.ja.xlf b/resources/xlf/ja/big-data-cluster.ja.xlf deleted file mode 100644 index 695fb160d6..0000000000 --- a/resources/xlf/ja/big-data-cluster.ja.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - マウントの削除エラー - - - Error retrieving BDC status from {0} - {0} から BDC ステータスを取得する操作でエラーが発生しました - - - Error retrieving cluster config from {0} - {0} からのクラスター構成の取得でエラーが発生しました - - - Error retrieving endpoints from {0} - {0} からのエンドポイントの取得でエラーが発生しました - - - Error creating mount - マウントの作成でエラーが発生しました - - - Error refreshing mount - マウントの更新エラー - - - Error getting mount status - マウント状態の取得エラー - - - Error during authentication - 認証中のエラー - - - You do not have permission to log into this cluster using Windows Authentication - Windows 認証を使用してこのクラスターにログインするアクセス許可がありません - - - This cluster does not support Windows authentication - このクラスターは Windows 認証をサポートしていません - - - - - - - Add - 追加 - - - Add New Controller - 新しいコントローラーの追加 - - - Basic - 基本 - - - Big Data Cluster overview - ビッグ データ クラスターの概要 - - - Cluster Details - クラスターの詳細 - - - Cluster Overview - クラスターの概要 - - - Cluster Properties - クラスターのプロパティ - - - Cluster State - クラスターの状態 - - - Copy - コピー - - - Endpoint - エンドポイント - - - Health Status - 正常性状態 - - - Health Status Details - 正常性状態の詳細 - - - Instance - インスタンス - - - Last Updated : {0} - 最終更新日: {0} - - - Loading cluster state completed - クラスター状態の読み込みが完了しました - - - Loading health status completed - 正常性状態の読み込みが完了しました - - - Logs - ログ - - - Metrics and Logs - メトリックとログ - - - The dashboard requires a connection. Please click retry to enter your credentials. - ダッシュボードには接続が必要です。[再試行] をクリックして資格情報を入力してください。 - - - Node Metrics - ノード メトリック - - - N/A - 該当なし - - - Refresh - 最新の情報に更新 - - - Service - サービス - - - Service Endpoints - サービス エンドポイント - - - Service Name - サービス名 - - - SQL Metrics - SQL メトリック - - - State - 状態 - - - Status Icon - 状態アイコン - - - Troubleshoot - トラブルシューティング - - - Unexpected error occurred: {0} - 予期しないエラーが発生しました: {0} - - - View - 表示 - - - View Details - 詳細の表示 - - - View Error Details - エラーの詳細の表示 - - - View Kibana Logs {0} - Kibana ログ {0} の表示 - - - View Node Metrics {0} - ノード メトリック {0} の表示 - - - View SQL Metrics {0} - SQL メトリック {0} の表示 - - - Cancel - キャンセル - - - Cluster Management URL - クラスター管理の URL - - - Connect to Controller - コントローラーに接続する - - - Endpoint '{0}' copied to clipboard - エンドポイント '{0}' がクリップボードにコピーされました - - - Delete Mount - マウントの削除 - - - Deleting HDFS Mount on path {0} - パス {0} 上の HDFS マウントを削除しています - - - Delete mount request submitted - マウントの削除要求が送信されました - - - Unexpected error retrieving BDC Endpoints: {0} - BDC エンドポイントの取得中に予期しないエラーが発生しました: {0} - - - Password is required - パスワードが必須です - - - Username is required - ユーザー名が必須です - - - Cluster Connection - クラスター接続 - - - Windows Authentication - Windows 認証 - - - Mount credentials for authentication to remote data source for reads - 認証用の資格情報をリモート データ ソースにマウントして読み取りを行う - - - Credentials - 資格情報 - - - Mount HDFS Folder - HDFS フォルダーのマウント - - - Bad formatting of credentials at {0} - {0} での資格情報の書式設定が正しくありません - - - Unknown error occurred during the mount process - マウント プロセス中に不明なエラーが発生しました - - - Login to controller failed - コントローラーへのログインに失敗しました - - - Login to controller failed: {0} - コントローラーへのログインに失敗しました: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - マウントに関連付ける新しい (存在しない) ディレクトリへのパス - - - HDFS Path - HDFS パス - - - Mount Configuration - マウント構成 - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - リモート データ ソースへの URI。ADLS の例: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - リモート URI - - - Mounting HDFS folder is complete - HDFS フォルダーのマウントが完了しました - - - Error mounting folder: {0} - フォルダーのマウントでエラーが発生しました: {0} - - - Mounting is likely to complete, check back later to verify - マウントが完了しようとしています。後でご確認ください - - - Mounting HDFS folder on path {0} - パス {0} 上に HDFS フォルダーをマウントしています - - - Mount creation has started - マウントの作成が開始されました - - - OK - OK - - - Password - パスワード - - - Refresh Mount - マウントの更新 - - - Refreshing HDFS Mount on path {0} - パス {0} 上の HDFS マウントを更新しています - - - Refresh mount request submitted - マウントの更新要求が送信されました - - - Remember Password - パスワードを記憶する - - - Authentication type - 認証の種類 - - - URL - URL - - - Username - ユーザー名 - - - - - - - Unexpected error loading saved controllers: {0} - 保存されたコントローラーの読み込み中に予期しないエラーが発生しました: {0} - - - - - - - Healthy - 正常 - - - Unhealthy - 異常 - - - Application Proxy - アプリケーション プロキシ - - - Cluster Management Service - クラスター管理サービス - - - Gateway to access HDFS files, Spark - HDFS ファイルにアクセスするためのゲートウェイ、Spark - - - Metrics Dashboard - メトリック ダッシュボード - - - Log Search Dashboard - ログ検索ダッシュボード - - - Proxy for running Spark statements, jobs, applications - Spark ステートメント、ジョブ、アプリケーションを実行するためのプロキシ - - - Management Proxy - 管理プロキシ - - - Management Proxy - 管理プロキシ - - - Spark Jobs Management and Monitoring Dashboard - Spark ジョブの管理と監視ダッシュボード - - - SQL Server Master Instance Front-End - SQL Server マスター インスタンス フロントエンド - - - HDFS File System Proxy - HDFS ファイル システム プロキシ - - - Spark Diagnostics and Monitoring Dashboard - Spark 診断と監視ダッシュボード - - - Unexpected error retrieving BDC Endpoints: {0} - BDC エンドポイントの取得中に予期しないエラーが発生しました: {0} - - - App - アプリ - - - Control - コントロール - - - Gateway - ゲートウェイ - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - アップグレードの適用中 - - - Applying Managed Upgrade - マネージド アップグレードの適用中 - - - Creating - 作成中 - - - Creating Groups - グループの作成中 - - - Creating Kerberos Delegation Setup - Kerberos 委任セットアップの作成中 - - - Creating Resources - リソースの作成中 - - - Creating Secrets - シークレットの作成中 - - - Deleted - 削除済み - - - Deleting - 削除中 - - - Error - エラー - - - Managed Upgrading - マネージド アップグレード中 - - - Ready - 準備完了 - - - Rollback - ロールバック - - - Rollback Complete - ロールバックの完了 - - - Rollback In Progress - ロールバックが進行中 - - - Running - 実行中 - - - Upgrade Paused - アップグレードが一時停止しました - - - Upgrading - アップグレード中 - - - Waiting - 待機中 - - - Waiting For Deletion - 削除の待機中 - - - Waiting For Groups - グループの待機中 - - - Waiting For Kerberos Delegation Setup - Kerberos 委任のセットアップの待機中 - - - Waiting For Resources - リソースの待機中 - - - Waiting For Secrets - シークレットの待機中 - - - Waiting For Upgrade - アップグレードの待機中 - - - - - - - Big Data Cluster Dashboard - - ビッグ データ クラスター ダッシュボード - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - ビッグ データ クラスター アドオンは廃止され、そのための Azure Data Studio 機能は今後のリリースで削除されます。詳細と今後のサポートについては、[こちら](https://go.microsoft.com/fwlink/?linkid=2207340) を参照してください。 - - - Controller endpoint information was not found - コントローラー エンドポイント情報が見つかりませんでした - - - Are you sure you want to remove '{0}'? - '{0}' を削除してよろしいですか? - - - No - いいえ - - - Yes - はい - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - {0}、{1}、{2} に同意します。 - - - azdata License Terms - azdata ライセンス条項 - - - SQL Server License Terms - SQL Server ライセンス条項 - - - AKS cluster name - AKS クラスター名 - - - Region - リージョン - - - Resource group name - リソース グループ名 - - - Azure settings - Azure の設定 - - - Subscription id - サブスクリプション ID - - - Use my default Azure subscription - 既定の Azure サブスクリプションを使用する - - - VM count - VM 数 - - - VM size - VM サイズ - - - Cluster name - クラスター名 - - - SQL Server Big Data Cluster settings - SQL Server ビッグ データ クラスターの設定 - - - Confirm password - パスワードの確認 - - - Controller username - コントローラーのユーザー名 - - - Capacity for data (GB) - データの容量 (GB) - - - Deployment target - デプロイ ターゲット - - - Existing Azure Kubernetes Service Cluster - 既存の Azure Kubernetes Service クラスター - - - Existing Azure Red Hat OpenShift cluster - 既存の Azure Red Hat OpenShift クラスター - - - Existing Kubernetes Cluster (kubeadm) - 既存の Kubernetes クラスター (kubeadm) - - - Existing OpenShift cluster - 既存の OpenShift クラスター - - - New Azure Kubernetes Service Cluster - 新しい Azure Kubernetes Service クラスター - - - Capacity for logs (GB) - ログの容量 (GB) - - - Password - パスワード - - - Storage class name - ストレージ クラス名 - - - Big Data Cluster - ビッグ データ クラスター - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - HDFS、Spark、コントローラーなどの SQL Server ビッグ データ クラスター エンドポイントに対する SSL 検証エラーを無視する (true の場合) - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - - - Loading controllers... - コントローラーを読み込んでいます... - - - Connect to Existing Controller - 既存のコントローラーに接続する - - - Create New Controller - 新しいコントローラーの作成 - - - Delete Mount - マウントの削除 - - - Manage - 管理 - - - Mount HDFS - HDFS のマウント - - - Refresh - 最新の情報に更新 - - - Refresh Mount - マウントの更新 - - - Remove Controller - コントローラーの削除 - - - Support for managing SQL Server Big Data Clusters - SQL Server ビッグ データ クラスターの管理をサポート - - - Microsoft Privacy Statement - Microsoft プライバシー ステートメント - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - SQL Server ビッグ データ クラスターを使用すると、Kubernetes で実行されている SQL Server、Spark、および HDFS のコンテナーのスケーラブルなクラスターをデプロイできます。 - - - SQL Server Big Data Cluster - SQL Server ビッグ データ クラスター - - - SQL Server Big Data Clusters - SQL Server ビッグ データ クラスター - - - Version - バージョン - - - - \ No newline at end of file diff --git a/resources/xlf/ko/big-data-cluster.ko.xlf b/resources/xlf/ko/big-data-cluster.ko.xlf deleted file mode 100644 index d4a2e1da1e..0000000000 --- a/resources/xlf/ko/big-data-cluster.ko.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - 탑재를 삭제하는 중 오류 발생 - - - Error retrieving BDC status from {0} - {0}에서 BDC 상태 검색 중 오류 발생 - - - Error retrieving cluster config from {0} - {0}에서 클러스터 구성을 검색하는 동안 오류 발생 - - - Error retrieving endpoints from {0} - {0}에서 엔드포인트 검색 중 오류 발생 - - - Error creating mount - 탑재를 만드는 중 오류 발생 - - - Error refreshing mount - 탑재를 새로 고치는 중 오류 발생 - - - Error getting mount status - 탑재 상태를 가져오는 동안 오류 발생 - - - Error during authentication - 인증 오류 - - - You do not have permission to log into this cluster using Windows Authentication - Windows 인증을 사용하여 이 클러스터에 로그인할 수 있는 권한이 없습니다. - - - This cluster does not support Windows authentication - 이 클러스터는 Windows 인증을 지원하지 않습니다. - - - - - - - Add - 추가 - - - Add New Controller - 새 컨트롤러 추가 - - - Basic - 기본 - - - Big Data Cluster overview - 빅 데이터 클러스터 개요 - - - Cluster Details - 클러스터 세부 정보 - - - Cluster Overview - 클러스터 개요 - - - Cluster Properties - 클러스터 속성 - - - Cluster State - 클러스터 상태 - - - Copy - 복사 - - - Endpoint - 엔드포인트 - - - Health Status - 상태 - - - Health Status Details - 상태 세부 정보 - - - Instance - 인스턴스 - - - Last Updated : {0} - 마지막으로 업데이트한 날짜: {0} - - - Loading cluster state completed - 클러스터 상태 로드 완료 - - - Loading health status completed - 상태 로드 완료 - - - Logs - 로그 - - - Metrics and Logs - 메트릭 및 로그 - - - The dashboard requires a connection. Please click retry to enter your credentials. - 대시보드에 연결이 필요합니다. 자격 증명을 입력하려면 다시 시도를 클릭하세요. - - - Node Metrics - 노드 메트릭 - - - N/A - 해당 없음 - - - Refresh - 새로 고침 - - - Service - 서비스 - - - Service Endpoints - 서비스 엔드포인트 - - - Service Name - 서비스 이름 - - - SQL Metrics - SQL 메트릭 - - - State - 상태 - - - Status Icon - 상태 아이콘 - - - Troubleshoot - 문제 해결 - - - Unexpected error occurred: {0} - 예기치 않은 오류가 발생했습니다. {0} - - - View - 보기 - - - View Details - 세부 정보 보기 - - - View Error Details - 오류 세부 정보 보기 - - - View Kibana Logs {0} - Kibana 로그 {0} 보기 - - - View Node Metrics {0} - 노드 메트릭 {0} 보기 - - - View SQL Metrics {0} - SQL 메트릭 {0} 보기 - - - Cancel - 취소 - - - Cluster Management URL - 클러스터 관리 URL - - - Connect to Controller - 컨트롤러에 연결 - - - Endpoint '{0}' copied to clipboard - 엔드포인트 '{0}'이(가) 클립보드에 복사됨 - - - Delete Mount - 탑재 삭제 - - - Deleting HDFS Mount on path {0} - 경로 {0}에서 HDFS 탑재를 삭제하는 중 - - - Delete mount request submitted - 탑재 삭제 요청이 제출됨 - - - Unexpected error retrieving BDC Endpoints: {0} - BDC 엔드포인트를 검색하는 동안 예기치 않은 오류 발생: {0} - - - Password is required - 암호는 필수입니다. - - - Username is required - 사용자 이름이 필요합니다. - - - Cluster Connection - 클러스터 연결 - - - Windows Authentication - Windows 인증 - - - Mount credentials for authentication to remote data source for reads - 읽기 위해 원격 데이터 원본에 인증용 자격 증명 탑재 - - - Credentials - 자격 증명 - - - Mount HDFS Folder - HDFS 폴더 탑재 - - - Bad formatting of credentials at {0} - {0}에 있는 자격 증명 형식이 잘못됨 - - - Unknown error occurred during the mount process - 탑재 프로세스 중에 알 수 없는 오류가 발생했습니다. - - - Login to controller failed - 컨트롤러에 로그인하지 못함 - - - Login to controller failed: {0} - 컨트롤러에 로그인하지 못함: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - 탑재와 연결하려는 새(기존 항목 아님) 디렉터리의 경로 - - - HDFS Path - HDFS 경로 - - - Mount Configuration - 탑재 구성 - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - 원격 데이터 원본에 대한 URI입니다. ADLS의 예: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - 원격 URI - - - Mounting HDFS folder is complete - HDFS 폴더 탑재가 완료되었습니다. - - - Error mounting folder: {0} - 폴더 탑재 오류: {0} - - - Mounting is likely to complete, check back later to verify - 탑재가 완료될 수 있습니다. 나중에 다시 확인하세요. - - - Mounting HDFS folder on path {0} - 경로 {0}에 HDFS 폴더를 탑재하는 중 - - - Mount creation has started - 탑재 만들기를 시작했습니다. - - - OK - 확인 - - - Password - 암호 - - - Refresh Mount - 탑재 새로 고침 - - - Refreshing HDFS Mount on path {0} - 경로 {0}에서 HDFS 탑재를 새로 고치는 중 - - - Refresh mount request submitted - 탑재 새로 고침 요청이 제출됨 - - - Remember Password - 암호 저장 - - - Authentication type - 인증 유형 - - - URL - URL - - - Username - 사용자 이름 - - - - - - - Unexpected error loading saved controllers: {0} - 저장된 컨트롤러를 로드하는 동안 예기치 않은 오류 발생: {0} - - - - - - - Healthy - 정상 - - - Unhealthy - 비정상 - - - Application Proxy - 애플리케이션 프록시 - - - Cluster Management Service - 클러스터 관리 서비스 - - - Gateway to access HDFS files, Spark - HDFS 파일에 액세스하기 위한 게이트웨이, Spark - - - Metrics Dashboard - 메트릭 대시보드 - - - Log Search Dashboard - 로그 검색 대시보드 - - - Proxy for running Spark statements, jobs, applications - Spark 문, 작업, 애플리케이션 실행을 위한 프록시 - - - Management Proxy - 관리 프록시 - - - Management Proxy - 관리 프록시 - - - Spark Jobs Management and Monitoring Dashboard - Spark 작업 관리 및 모니터링 대시보드 - - - SQL Server Master Instance Front-End - SQL Server 마스터 인스턴스 프런트 엔드 - - - HDFS File System Proxy - HDFS 파일 시스템 프록시 - - - Spark Diagnostics and Monitoring Dashboard - Spark 진단 및 모니터링 대시보드 - - - Unexpected error retrieving BDC Endpoints: {0} - BDC 엔드포인트를 검색하는 동안 예기치 않은 오류 발생: {0} - - - App - - - - Control - 컨트롤 - - - Gateway - 게이트웨이 - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - 업그레이드를 적용하는 중 - - - Applying Managed Upgrade - 관리형 업그레이드를 적용하는 중 - - - Creating - 만드는 중 - - - Creating Groups - 그룹을 만드는 중 - - - Creating Kerberos Delegation Setup - Kerberos 위임 설정을 만드는 중 - - - Creating Resources - 리소스를 만드는 중 - - - Creating Secrets - 비밀을 만드는 중 - - - Deleted - 삭제 - - - Deleting - 삭제하는 중 - - - Error - 오류 - - - Managed Upgrading - 관리형 업그레이드 - - - Ready - 준비 완료 - - - Rollback - 롤백 - - - Rollback Complete - 롤백 완료 - - - Rollback In Progress - 롤백 진행 중 - - - Running - 실행하는 중 - - - Upgrade Paused - 업그레이드를 일시 중지함 - - - Upgrading - 업그레이드하는 중 - - - Waiting - 기다리는 중 - - - Waiting For Deletion - 삭제를 대기하는 중 - - - Waiting For Groups - 그룹을 대기하는 중 - - - Waiting For Kerberos Delegation Setup - Kerberos 위임 설정을 대기하는 중 - - - Waiting For Resources - 리소스를 대기하는 중 - - - Waiting For Secrets - 비밀을 기다리는 중 - - - Waiting For Upgrade - 업그레이드를 대기하는 중 - - - - - - - Big Data Cluster Dashboard - - 빅 데이터 클러스터 대시보드 - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - 빅 데이터 클러스터 추가 기능은 사용 중지되며 이에 대한 Azure Data Studio 기능은 향후 릴리스에서 제거될 예정입니다. [여기](https://go.microsoft.com/fwlink/?linkid=2207340)에서 이에 대한 자세한 내용과 향후 지원을 확인하세요. - - - Controller endpoint information was not found - 컨트롤러 엔드포인트 정보를 찾을 수 없음 - - - Are you sure you want to remove '{0}'? - '{0}'을(를) 제거하시겠습니까? - - - No - 아니요 - - - Yes - - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - {0}, {1} 및 {2}에 동의합니다. - - - azdata License Terms - azdata 사용 조건 - - - SQL Server License Terms - SQL Server 사용 조건 - - - AKS cluster name - AKS 클러스터 이름 - - - Region - 지역 - - - Resource group name - 리소스 그룹 이름 - - - Azure settings - Azure 설정 - - - Subscription id - 구독 ID - - - Use my default Azure subscription - 내 기본 Azure 구독 사용 - - - VM count - VM 수 - - - VM size - VM 크기 - - - Cluster name - 클러스터 이름 - - - SQL Server Big Data Cluster settings - SQL Server 빅 데이터 클러스터 설정 - - - Confirm password - 암호 확인 - - - Controller username - 컨트롤러 사용자 이름 - - - Capacity for data (GB) - 데이터 용량(GB) - - - Deployment target - 배포 대상 - - - Existing Azure Kubernetes Service Cluster - 기존 Azure Kubernetes Service 클러스터 - - - Existing Azure Red Hat OpenShift cluster - 기존 Azure Red Hat OpenShift 클러스터 - - - Existing Kubernetes Cluster (kubeadm) - 기존 Kubernetes 클러스터(kubeadm) - - - Existing OpenShift cluster - 기존 OpenShift 클러스터 - - - New Azure Kubernetes Service Cluster - 새 Azure Kubernetes Service 클러스터 - - - Capacity for logs (GB) - 로그 용량(GB) - - - Password - 암호 - - - Storage class name - 스토리지 클래스 이름 - - - Big Data Cluster - 빅 데이터 클러스터 - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - True인 경우 HDFS, Spark 및 Controller와 같은 SQL Server 빅 데이터 클러스터 엔드포인트를 대상으로 SSL 확인 오류 무시 - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - 등록된 SQL 빅 데이터 클러스터 컨트롤러가 없습니다. [자세한 정보](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[컨트롤러 연결](command:bigDataClusters.command.connectController) - - - Loading controllers... - 컨트롤러를 로드하는 중... - - - Connect to Existing Controller - 기존 컨트롤러에 연결 - - - Create New Controller - 새 컨트롤러 만들기 - - - Delete Mount - 탑재 삭제 - - - Manage - 관리 - - - Mount HDFS - HDFS 탑재 - - - Refresh - 새로 고침 - - - Refresh Mount - 탑재 새로 고침 - - - Remove Controller - 컨트롤러 제거 - - - Support for managing SQL Server Big Data Clusters - SQL Server 빅 데이터 클러스터 관리 지원 - - - Microsoft Privacy Statement - Microsoft 개인정보처리방침 - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - SQL Server 빅 데이터 클러스터를 사용하면 Kubernetes에서 실행되는 SQL Server, Spark 및 HDFS 컨테이너의 확장 가능한 클러스터를 배포할 수 있습니다. - - - SQL Server Big Data Cluster - SQL Server 빅 데이터 클러스터 - - - SQL Server Big Data Clusters - SQL Server 빅 데이터 클러스터 - - - Version - 버전 - - - - \ No newline at end of file diff --git a/resources/xlf/pt-br/big-data-cluster.pt-BR.xlf b/resources/xlf/pt-br/big-data-cluster.pt-BR.xlf deleted file mode 100644 index 25f73cebb6..0000000000 --- a/resources/xlf/pt-br/big-data-cluster.pt-BR.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Erro ao excluir a montagem - - - Error retrieving BDC status from {0} - Erro ao recuperar o status do BDC de {0} - - - Error retrieving cluster config from {0} - Erro ao recuperar a configuração do cluster do {0} - - - Error retrieving endpoints from {0} - Erro ao recuperar os pontos de extremidade de {0} - - - Error creating mount - Erro ao criar a montagem - - - Error refreshing mount - Erro ao atualizar a montagem - - - Error getting mount status - Erro ao obter o status de montagem - - - Error during authentication - Erro durante a autenticação - - - You do not have permission to log into this cluster using Windows Authentication - Você não tem permissão para fazer logon nesse cluster usando a autenticação do Windows - - - This cluster does not support Windows authentication - Este cluster não dá suporte à autenticação do Windows - - - - - - - Add - Adicionar - - - Add New Controller - Adicionar Novo Controlador - - - Basic - Básico - - - Big Data Cluster overview - Visão geral do cluster de Big Data - - - Cluster Details - Detalhes do Cluster - - - Cluster Overview - Visão Geral do Cluster - - - Cluster Properties - Propriedades do Cluster - - - Cluster State - Estado do Cluster - - - Copy - Copiar - - - Endpoint - Ponto de Extremidade - - - Health Status - Status da Integridade - - - Health Status Details - Detalhes do Status da Integridade - - - Instance - Instância - - - Last Updated : {0} - Última Atualização: {0} - - - Loading cluster state completed - Carregamento do estado do cluster concluído - - - Loading health status completed - Carregamento do status da integridade concluído - - - Logs - Logs - - - Metrics and Logs - Métricas e Logs - - - The dashboard requires a connection. Please click retry to enter your credentials. - O painel requer uma conexão. Clique em tentar novamente para inserir suas credenciais. - - - Node Metrics - Métricas do Node - - - N/A - N/D - - - Refresh - Atualizar - - - Service - Serviço - - - Service Endpoints - Pontos de Extremidade de Serviço - - - Service Name - Nome do Serviço - - - SQL Metrics - Métricas do SQL - - - State - Estado - - - Status Icon - Ícone de Status - - - Troubleshoot - Solucionar problemas - - - Unexpected error occurred: {0} - Erro inesperado: {0} - - - View - Exibir - - - View Details - Exibir Detalhes - - - View Error Details - Exibir Detalhes do Erro - - - View Kibana Logs {0} - Exibir os Logs do Kibana {0} - - - View Node Metrics {0} - Exibir Métricas do Node {0} - - - View SQL Metrics {0} - Exibir Métricas do SQL {0} - - - Cancel - Cancelar - - - Cluster Management URL - URL de Gerenciamento de Cluster - - - Connect to Controller - Conectar ao Controlador - - - Endpoint '{0}' copied to clipboard - Ponto de extremidade '{0}' copiado para a área de transferência - - - Delete Mount - Excluir Montagem - - - Deleting HDFS Mount on path {0} - Excluindo a Montagem do HDFS no caminho {0} - - - Delete mount request submitted - Solicitação de exclusão de montagem enviada - - - Unexpected error retrieving BDC Endpoints: {0} - Erro inesperado ao recuperar os pontos de Extremidade do BDC: {0} - - - Password is required - A senha é obrigatória - - - Username is required - O nome de usuário é obrigatório - - - Cluster Connection - Conexão de Cluster - - - Windows Authentication - Autenticação do Windows - - - Mount credentials for authentication to remote data source for reads - Montar as credenciais para autenticação na fonte de dados remota para leituras - - - Credentials - Credenciais - - - Mount HDFS Folder - Montar a Pasta do HDFS - - - Bad formatting of credentials at {0} - Formatação incorreta de credenciais em {0} - - - Unknown error occurred during the mount process - Erro desconhecido durante o processo de montagem - - - Login to controller failed - Falha ao entrar no controlador - - - Login to controller failed: {0} - Falha ao entrar no controlador: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Caminho para um novo diretório (não existente) que você deseja associar com a montagem - - - HDFS Path - Caminho do HDFS - - - Mount Configuration - Configuração da Montagem - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - O URI da fonte de dados remota. Exemplo para o ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - URI remoto - - - Mounting HDFS folder is complete - A montagem da pasta do HDFS está concluída - - - Error mounting folder: {0} - Erro na montagem da pasta: {0} - - - Mounting is likely to complete, check back later to verify - A montagem provavelmente será concluída. Volte mais tarde para verificar - - - Mounting HDFS folder on path {0} - Montando a pasta do HDFS no caminho {0} - - - Mount creation has started - A criação da montagem foi iniciada - - - OK - OK - - - Password - Senha - - - Refresh Mount - Atualizar Montagem - - - Refreshing HDFS Mount on path {0} - Atualizando a Montagem do HDFS no caminho {0} - - - Refresh mount request submitted - Solicitação de atualização de montagem enviada - - - Remember Password - Lembrar Senha - - - Authentication type - Tipo de autenticação - - - URL - URL - - - Username - Nome de usuário - - - - - - - Unexpected error loading saved controllers: {0} - Erro inesperado ao carregar os controladores salvos: {0} - - - - - - - Healthy - Íntegro - - - Unhealthy - Não íntegro - - - Application Proxy - Proxy de Aplicativo - - - Cluster Management Service - Serviço de Gerenciamento de Cluster - - - Gateway to access HDFS files, Spark - Gateway para acessar arquivos HDFS, Spark - - - Metrics Dashboard - Painel de Métricas - - - Log Search Dashboard - Painel de Pesquisa de Logs - - - Proxy for running Spark statements, jobs, applications - Proxy para a execução de instruções, trabalhos, aplicativos do Spark - - - Management Proxy - Proxy de Gerenciamento - - - Management Proxy - Proxy de Gerenciamento - - - Spark Jobs Management and Monitoring Dashboard - Painel de Gerenciamento e Monitoramento de Trabalhos do Spark - - - SQL Server Master Instance Front-End - Front-end da Instância Mestra do SQL Server - - - HDFS File System Proxy - Proxy do Sistema de Arquivos HDFS - - - Spark Diagnostics and Monitoring Dashboard - Painel de Monitoramento e Diagnóstico do Spark - - - Unexpected error retrieving BDC Endpoints: {0} - Erro inesperado ao recuperar os pontos de Extremidade do BDC: {0} - - - App - Aplicativo - - - Control - Controle - - - Gateway - Gateway - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - Aplicando Atualização - - - Applying Managed Upgrade - Aplicando Atualização Gerenciada - - - Creating - Criando - - - Creating Groups - Criando Grupos - - - Creating Kerberos Delegation Setup - Criando Configuração de Delegação do Kerberos - - - Creating Resources - Criando Recursos - - - Creating Secrets - Criando Segredos - - - Deleted - Excluído - - - Deleting - Excluindo - - - Error - Erro - - - Managed Upgrading - Atualização Gerenciada - - - Ready - Pronto - - - Rollback - Reverter - - - Rollback Complete - Reversão Concluída - - - Rollback In Progress - Reversão Em Andamento - - - Running - Executando - - - Upgrade Paused - Atualização em Pausa - - - Upgrading - Atualizando - - - Waiting - Aguardando - - - Waiting For Deletion - Aguardando Exclusão - - - Waiting For Groups - Aguardando Grupos - - - Waiting For Kerberos Delegation Setup - Aguardando Configuração de Delegação do Kerberos - - - Waiting For Resources - Esperando Recursos - - - Waiting For Secrets - Esperando Os Segredos - - - Waiting For Upgrade - Aguardando Upgrade - - - - - - - Big Data Cluster Dashboard - - Painel do Cluster de Big Data – - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - O complemento Cluster de Big Data está sendo desativado e a funcionalidade do Azure Data Studio para ele será removida em uma versão futura. Leia mais sobre isso e o suporte no futuro [here](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - As informações do ponto de extremidade do controlador não foram encontradas - - - Are you sure you want to remove '{0}'? - Tem certeza de que deseja remover '{0}'? - - - No - Não - - - Yes - Sim - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - Aceito {0}, {1} e {2}. - - - azdata License Terms - Termos de Licença do azdata - - - SQL Server License Terms - Termos de Licença do SQL Server - - - AKS cluster name - Nome do cluster do AKS - - - Region - Região - - - Resource group name - Nome do grupo de recursos - - - Azure settings - Configurações do Azure - - - Subscription id - ID da assinatura - - - Use my default Azure subscription - Usar minha assinatura padrão do Azure - - - VM count - Contagem de VMs - - - VM size - Tamanho da VM - - - Cluster name - Nome do cluster - - - SQL Server Big Data Cluster settings - Configurações do cluster de Big Data do SQL Server - - - Confirm password - Confirmar a senha - - - Controller username - Nome de usuário do controlador - - - Capacity for data (GB) - Capacidade de dados (GB) - - - Deployment target - Destino de implantação - - - Existing Azure Kubernetes Service Cluster - Cluster do Serviço de Kubernetes do Azure existente - - - Existing Azure Red Hat OpenShift cluster - Cluster do Red Hat OpenShift no Azure existente - - - Existing Kubernetes Cluster (kubeadm) - Cluster do Kubernetes existente (kubeadm) - - - Existing OpenShift cluster - Cluster do OpenShift existente - - - New Azure Kubernetes Service Cluster - Novo Cluster do Serviço de Kubernetes do Azure - - - Capacity for logs (GB) - Capacidade de logs (GB) - - - Password - Senha - - - Storage class name - Nome da classe de armazenamento - - - Big Data Cluster - Cluster de Big Data - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Ignorar os erros de verificação do SSL em relação aos pontos de extremidade do Cluster de Big Data do SQL Server, como o HDFS, o Spark e o Controlador, se for true - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - Nenhum controlador do Cluster de Big Data do SQL registrado. [Saiba Mais](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Conectar Controlador](command:bigDataClusters.command.connectController) - - - Loading controllers... - Carregando controladores... - - - Connect to Existing Controller - Conectar-se ao Controlador Existente - - - Create New Controller - Criar Controlador - - - Delete Mount - Excluir Montagem - - - Manage - Gerenciar - - - Mount HDFS - Montar o HDFS - - - Refresh - Atualizar - - - Refresh Mount - Atualizar Montagem - - - Remove Controller - Remover Controlador - - - Support for managing SQL Server Big Data Clusters - Suporte para gerenciar clusters de Big Data do SQL Server - - - Microsoft Privacy Statement - Política de Privacidade da Microsoft - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - O Cluster de Big Data do SQL Server permite implantar clusters escalonáveis de contêineres do SQL Server, do Spark e do HDFS em execução no Kubernetes - - - SQL Server Big Data Cluster - Cluster de Big Data do SQL Server - - - SQL Server Big Data Clusters - Clusters de Big Data do SQL Server - - - Version - Versão - - - - \ No newline at end of file diff --git a/resources/xlf/ru/big-data-cluster.ru.xlf b/resources/xlf/ru/big-data-cluster.ru.xlf deleted file mode 100644 index ed33e6eb59..0000000000 --- a/resources/xlf/ru/big-data-cluster.ru.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - Ошибка при удалении подключения - - - Error retrieving BDC status from {0} - Ошибка при получении состояния модели подключения к бизнес-данным из {0} - - - Error retrieving cluster config from {0} - Ошибка при получении конфигурации кластера из {0} - - - Error retrieving endpoints from {0} - Ошибка при извлечении конечных точек из {0} - - - Error creating mount - Ошибка при создании подключения - - - Error refreshing mount - Ошибка при обновлении подключения - - - Error getting mount status - Ошибка при получении состояния подключения - - - Error during authentication - Ошибка при проверке подлинности - - - You do not have permission to log into this cluster using Windows Authentication - У вас нет разрешения на вход в этот кластер с использованием проверки подлинности Windows - - - This cluster does not support Windows authentication - Этот кластер не поддерживает проверку подлинности Windows - - - - - - - Add - Добавить - - - Add New Controller - Добавление нового контроллера - - - Basic - Базовое - - - Big Data Cluster overview - Обзор кластера больших данных - - - Cluster Details - Подробная информация о кластере - - - Cluster Overview - Обзор кластера - - - Cluster Properties - Свойства кластера - - - Cluster State - Состояние кластера - - - Copy - Копировать - - - Endpoint - Конечная точка - - - Health Status - Состояние работоспособности - - - Health Status Details - Сведения о состоянии работоспособности - - - Instance - Экземпляр - - - Last Updated : {0} - Последнее обновление: {0} - - - Loading cluster state completed - Загрузка состояния кластера завершена - - - Loading health status completed - Загрузка состояния работоспособности завершена - - - Logs - Журналы - - - Metrics and Logs - Метрики и журналы - - - The dashboard requires a connection. Please click retry to enter your credentials. - Необходимо указать параметры подключения для панели мониторинга. Нажмите "Повторить", чтобы ввести свои учетные данные. - - - Node Metrics - Метрики узла - - - N/A - Н/Д - - - Refresh - Обновить - - - Service - Служба - - - Service Endpoints - Конечные точки службы - - - Service Name - Имя службы - - - SQL Metrics - Метрики SQL - - - State - Состояние - - - Status Icon - Значок состояния - - - Troubleshoot - Устранение неполадок - - - Unexpected error occurred: {0} - Произошла неожиданная ошибка: {0} - - - View - Вид - - - View Details - Посмотреть подробную информацию - - - View Error Details - Просмотреть сведения об ошибке - - - View Kibana Logs {0} - Просмотреть журналы Kibana {0} - - - View Node Metrics {0} - Просмотреть метрики узла {0} - - - View SQL Metrics {0} - Просмотреть метрики SQL {0} - - - Cancel - Отмена - - - Cluster Management URL - URL-адрес управления кластером - - - Connect to Controller - Подключение к контроллеру - - - Endpoint '{0}' copied to clipboard - Конечная точка "{0}" скопирована в буфер обмена - - - Delete Mount - Удаление подключения - - - Deleting HDFS Mount on path {0} - Удаление подключения HDFS по пути {0} - - - Delete mount request submitted - Запрос на удаление подключения отправлен - - - Unexpected error retrieving BDC Endpoints: {0} - При получении конечных точек модели подключения к бизнес-данным возникла неожиданная ошибка: {0} - - - Password is required - Требуется пароль - - - Username is required - Требуется имя пользователя - - - Cluster Connection - Подключение к кластеру - - - Windows Authentication - Проверка подлинности Windows - - - Mount credentials for authentication to remote data source for reads - Учетные данные подключения для проверки подлинности для чтения удаленного источника данных - - - Credentials - Учетные данные - - - Mount HDFS Folder - Подключение папки HDFS - - - Bad formatting of credentials at {0} - Неправильное форматирование учетных данных в {0} - - - Unknown error occurred during the mount process - Неизвестная ошибка в процессе подключения - - - Login to controller failed - Не удалось войти на контроллер - - - Login to controller failed: {0} - Ошибка входа на контроллер: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - Путь к новому (несуществующему) каталогу, который вы хотите связать с точкой подключения - - - HDFS Path - Путь HDFS - - - Mount Configuration - Конфигурация подключения - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - URI удаленного источника данных. Например, для ADLS: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - Удаленный URI - - - Mounting HDFS folder is complete - Подключение папки HDFS выполнено - - - Error mounting folder: {0} - Ошибка при подключении папки: {0} - - - Mounting is likely to complete, check back later to verify - Подключение, скорее всего, завершено, проверьте это позднее - - - Mounting HDFS folder on path {0} - Подключение папки HDFS по пути {0} - - - Mount creation has started - Было запущено создание подключения - - - OK - OK - - - Password - Пароль - - - Refresh Mount - Обновление подключения - - - Refreshing HDFS Mount on path {0} - Обновление подключения HDFS по пути {0} - - - Refresh mount request submitted - Запрос на обновление подключения отправлен - - - Remember Password - Запомнить пароль - - - Authentication type - Тип проверки подлинности - - - URL - URL-адрес - - - Username - Имя пользователя - - - - - - - Unexpected error loading saved controllers: {0} - Непредвиденная ошибка при загрузке сохраненных контроллеров: {0} - - - - - - - Healthy - Исправна - - - Unhealthy - Неработоспособный - - - Application Proxy - Прокси приложения - - - Cluster Management Service - Служба управления кластерами - - - Gateway to access HDFS files, Spark - Шлюз для доступа к файлам HDFS, Spark - - - Metrics Dashboard - Панель мониторинга метрик - - - Log Search Dashboard - Панель мониторинга поиска по журналам - - - Proxy for running Spark statements, jobs, applications - Прокси-сервер для выполнения инструкций, заданий и приложений Spark - - - Management Proxy - Прокси-сервер управления - - - Management Proxy - Прокси-сервер управления - - - Spark Jobs Management and Monitoring Dashboard - Панель мониторинга для отслеживания заданий Spark и управления ими - - - SQL Server Master Instance Front-End - Интерфейс главного экземпляра SQL Server - - - HDFS File System Proxy - Прокси-сервер файловой системы HDFS - - - Spark Diagnostics and Monitoring Dashboard - Панель мониторинга для отслеживания и диагностики Spark - - - Unexpected error retrieving BDC Endpoints: {0} - При получении конечных точек модели подключения к бизнес-данным возникла неожиданная ошибка: {0} - - - App - Приложение - - - Control - Управление - - - Gateway - Шлюз - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - Применение обновления - - - Applying Managed Upgrade - Применение управляемого обновления - - - Creating - Создание - - - Creating Groups - Создание групп - - - Creating Kerberos Delegation Setup - Создание настройки делегирования Kerberos - - - Creating Resources - Создание ресурсов - - - Creating Secrets - Создание секретов - - - Deleted - Удалить - - - Deleting - Удаление - - - Error - Ошибка - - - Managed Upgrading - Управляемое обновление - - - Ready - Готово - - - Rollback - Откат - - - Rollback Complete - Откат завершен - - - Rollback In Progress - Выполняется откат - - - Running - Работает - - - Upgrade Paused - Обновление приостановлено - - - Upgrading - Идет обновление - - - Waiting - Ожидание - - - Waiting For Deletion - Ожидание удаления - - - Waiting For Groups - Ожидание групп - - - Waiting For Kerberos Delegation Setup - Ожидание настройки делегирования Kerberos - - - Waiting For Resources - Ожидание ресурсов - - - Waiting For Secrets - Ожидание секретов - - - Waiting For Upgrade - Ожидание обновления - - - - - - - Big Data Cluster Dashboard - - Панель мониторинга кластера больших данных — - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - Поддержка надстройки кластера больших данных прекращается, соответствующая функциональность Azure Data Studio будет удалена в предстоящем выпуске. Дополнительные сведения об этом и о поддержке в будущем см. [здесь](https://go.microsoft.com/fwlink/?linkid=2207340). - - - Controller endpoint information was not found - Информация о конечных точках контроллера не найдена - - - Are you sure you want to remove '{0}'? - Вы действительно хотите удалить "{0}"? - - - No - Нет - - - Yes - Да - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - Я принимаю {0}, {1} и {2}. - - - azdata License Terms - Условия лицензии azdata - - - SQL Server License Terms - Условия лицензии SQL Server - - - AKS cluster name - Имя кластера AKS - - - Region - Регион - - - Resource group name - Имя группы ресурсов - - - Azure settings - Параметры Azure - - - Subscription id - Идентификатор подписки - - - Use my default Azure subscription - Использовать мою подписку Azure по умолчанию - - - VM count - Число виртуальных машин - - - VM size - Размер виртуальной машины - - - Cluster name - Имя кластера - - - SQL Server Big Data Cluster settings - Параметры кластера больших данных SQL Server - - - Confirm password - Подтверждение пароля - - - Controller username - Имя пользователя контроллера - - - Capacity for data (GB) - Емкость данных (ГБ) - - - Deployment target - Целевой объект развертывания - - - Existing Azure Kubernetes Service Cluster - Существующий кластер Службы Azure Kubernetes - - - Existing Azure Red Hat OpenShift cluster - Существующий кластер Azure Red Hat OpenShift - - - Existing Kubernetes Cluster (kubeadm) - Существующий кластер Kubernetes (kubeadm) - - - Existing OpenShift cluster - Существующий кластер OpenShift - - - New Azure Kubernetes Service Cluster - Новый кластер Службы Azure Kubernetes - - - Capacity for logs (GB) - Емкость для журналов (ГБ) - - - Password - Пароль - - - Storage class name - Имя класса хранения - - - Big Data Cluster - Кластер больших данных - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - Если этот параметр имеет значение TRUE, игнорировать ошибки проверки SSL в отношении конечных точек кластера больших данных SQL Server, таких как HDFS, Spark и Controller - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - Контроллеры кластера больших данных SQL не зарегистрированы. [Дополнительные сведения](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - - - Loading controllers... - Загрузка контроллеров… - - - Connect to Existing Controller - Подключение к существующему контроллеру - - - Create New Controller - Создание нового контроллера - - - Delete Mount - Удалить подключение - - - Manage - Управление - - - Mount HDFS - Подключение HDFS - - - Refresh - Обновить - - - Refresh Mount - Обновить подключение - - - Remove Controller - Удаление контроллера - - - Support for managing SQL Server Big Data Clusters - Поддержка управления кластерами больших данных SQL Server - - - Microsoft Privacy Statement - Заявление о конфиденциальности корпорации Майкрософт - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - Кластер больших данных SQL Server позволяет развертывать масштабируемые кластеры контейнеров SQL Server, Spark и HDFS, работающие на базе Kubernetes - - - SQL Server Big Data Cluster - Кластер больших данных SQL Server - - - SQL Server Big Data Clusters - Кластеры больших данных SQL Server - - - Version - Версия - - - - \ No newline at end of file diff --git a/resources/xlf/zh-hans/big-data-cluster.zh-Hans.xlf b/resources/xlf/zh-hans/big-data-cluster.zh-Hans.xlf deleted file mode 100644 index 2ceb7ab741..0000000000 --- a/resources/xlf/zh-hans/big-data-cluster.zh-Hans.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - 删除装载时出错 - - - Error retrieving BDC status from {0} - 从 {0} 检索 BDC 状态时出错 - - - Error retrieving cluster config from {0} - 从 {0} 检索群集配置时出错 - - - Error retrieving endpoints from {0} - 从 {0} 检索终结点时出错 - - - Error creating mount - 创建装载时出错 - - - Error refreshing mount - 刷新装载时出错 - - - Error getting mount status - 获取装载状态时出错 - - - Error during authentication - 身份验证期间出错 - - - You do not have permission to log into this cluster using Windows Authentication - 你没有使用 Windows 身份验证登录到此群集的权限 - - - This cluster does not support Windows authentication - 此群集不支持 Windows 身份验证 - - - - - - - Add - 添加 - - - Add New Controller - 添加新控制器 - - - Basic - 基本 - - - Big Data Cluster overview - 大数据群集概述 - - - Cluster Details - 群集详细信息 - - - Cluster Overview - 群集概述 - - - Cluster Properties - 群集属性 - - - Cluster State - 群集状态 - - - Copy - 复制 - - - Endpoint - 终结点 - - - Health Status - 运行状况 - - - Health Status Details - 运行状况详细信息 - - - Instance - 实例 - - - Last Updated : {0} - 上次更新时间: {0} - - - Loading cluster state completed - 群集状态加载已完成 - - - Loading health status completed - 运行状况加载已完成 - - - Logs - 日志 - - - Metrics and Logs - 指标和日志 - - - The dashboard requires a connection. Please click retry to enter your credentials. - 仪表板需要连接。请单击“重试”以输入凭据。 - - - Node Metrics - 节点指标 - - - N/A - 不适用 - - - Refresh - 刷新 - - - Service - 服务 - - - Service Endpoints - 服务终结点 - - - Service Name - 服务名称 - - - SQL Metrics - SQL 指标 - - - State - 状态 - - - Status Icon - 状态图标 - - - Troubleshoot - 疑难解答 - - - Unexpected error occurred: {0} - 发生意外错误: {0} - - - View - 查看 - - - View Details - 查看详细信息 - - - View Error Details - 查看错误详细信息 - - - View Kibana Logs {0} - 查看 Kibana 日志 {0} - - - View Node Metrics {0} - 查看节点指标 {0} - - - View SQL Metrics {0} - 查看 SQL 指标 {0} - - - Cancel - 取消 - - - Cluster Management URL - 群集管理 URL - - - Connect to Controller - 连接到控制器 - - - Endpoint '{0}' copied to clipboard - 终结点“{0}”已复制到剪贴板 - - - Delete Mount - 删除装载 - - - Deleting HDFS Mount on path {0} - 正在删除路径 {0} 上的 HDFS 装载 - - - Delete mount request submitted - 删除已提交的装载请求 - - - Unexpected error retrieving BDC Endpoints: {0} - 检索 BDC 终结点时出现意外错误: {0} - - - Password is required - 密码为必填项 - - - Username is required - 用户名为必填项 - - - Cluster Connection - 群集连接 - - - Windows Authentication - Windows 身份验证 - - - Mount credentials for authentication to remote data source for reads - 将身份验证凭据装载到远程数据源进行读取 - - - Credentials - 凭据 - - - Mount HDFS Folder - 装载 HDFS 文件夹 - - - Bad formatting of credentials at {0} - {0} 处凭据格式错误 - - - Unknown error occurred during the mount process - 装载过程中发生未知错误 - - - Login to controller failed - 未能登录到控制器 - - - Login to controller failed: {0} - 未能登录到控制器: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - 要与装载关联的新(不存在)目录的路径 - - - HDFS Path - HDFS 路径 - - - Mount Configuration - 装载配置 - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - 到远程数据源的 URI。ADLS 示例: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - 远程 URI - - - Mounting HDFS folder is complete - 已完成 HDFS 文件夹装载 - - - Error mounting folder: {0} - 装载文件夹时出错: {0} - - - Mounting is likely to complete, check back later to verify - 装载即将完成,请稍后回来查看以进行验证 - - - Mounting HDFS folder on path {0} - 正在路径 {0} 上装载 HDFS 文件夹 - - - Mount creation has started - 已开始装载创建 - - - OK - 确定 - - - Password - 密码 - - - Refresh Mount - 刷新装载 - - - Refreshing HDFS Mount on path {0} - 正在刷新路径 {0} 上的 HDFS 装载 - - - Refresh mount request submitted - 刷新已提交的装载请求 - - - Remember Password - 记住密码 - - - Authentication type - 身份验证类型 - - - URL - URL - - - Username - 用户名 - - - - - - - Unexpected error loading saved controllers: {0} - 加载已保存的控制器时出现意外错误: {0} - - - - - - - Healthy - 正常 - - - Unhealthy - 不正常 - - - Application Proxy - 应用程序代理 - - - Cluster Management Service - 群集管理服务 - - - Gateway to access HDFS files, Spark - 访问 HDFS 文件的网关,Spark - - - Metrics Dashboard - 指标仪表板 - - - Log Search Dashboard - 日志搜索仪表板 - - - Proxy for running Spark statements, jobs, applications - 用于运行 Spark 语句、作业和应用程序的代理 - - - Management Proxy - 管理代理 - - - Management Proxy - 管理代理 - - - Spark Jobs Management and Monitoring Dashboard - Spark 作业管理和监视仪表板 - - - SQL Server Master Instance Front-End - SQL Server 主实例前端 - - - HDFS File System Proxy - HDFS 文件系统代理 - - - Spark Diagnostics and Monitoring Dashboard - Spark 诊断和监视仪表板 - - - Unexpected error retrieving BDC Endpoints: {0} - 检索 BDC 终结点时出现意外错误: {0} - - - App - 应用 - - - Control - Control - - - Gateway - 网关 - - - HDFS - HDFS - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - 正在应用升级 - - - Applying Managed Upgrade - 正在应用托管升级 - - - Creating - 正在创建 - - - Creating Groups - 正在创建组 - - - Creating Kerberos Delegation Setup - 正在创建 Kerberos 委派设置 - - - Creating Resources - 正在创建资源 - - - Creating Secrets - 正在创建机密 - - - Deleted - 已删除 - - - Deleting - 正在删除 - - - Error - 错误 - - - Managed Upgrading - 正在托管升级 - - - Ready - 准备就绪 - - - Rollback - 回退 - - - Rollback Complete - 回退完成 - - - Rollback In Progress - 正在回退 - - - Running - 正在运行 - - - Upgrade Paused - 升级已暂停 - - - Upgrading - 正在升级 - - - Waiting - 正在等待 - - - Waiting For Deletion - 正在等待删除 - - - Waiting For Groups - 正在等待组 - - - Waiting For Kerberos Delegation Setup - 正在等待 Kerberos 委派设置 - - - Waiting For Resources - 正在等待资源 - - - Waiting For Secrets - 正在等待机密 - - - Waiting For Upgrade - 正在等待升级 - - - - - - - Big Data Cluster Dashboard - - 大数据群集仪表板 - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - 大数据群集加载项即将停用,将在即将发布的版本中删除其 Azure Data Studio 功能。在[此处](https://go.microsoft.com/fwlink/?linkid=2207340)阅读有关此事件和今后的支持的详细信息。 - - - Controller endpoint information was not found - 未找到控制器终结点信息 - - - Are you sure you want to remove '{0}'? - 确定要删除 "{0}" 吗? - - - No - - - - Yes - - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - 我接受 {0}、{1} 和 {2}。 - - - azdata License Terms - azdata 许可条款 - - - SQL Server License Terms - SQL Server 许可条款 - - - AKS cluster name - AKS 群集名称 - - - Region - 区域 - - - Resource group name - 资源组名称 - - - Azure settings - Azure 设置 - - - Subscription id - 订阅 ID - - - Use my default Azure subscription - 使用默认 Azure 订阅 - - - VM count - VM 计数 - - - VM size - VM 大小 - - - Cluster name - 群集名称 - - - SQL Server Big Data Cluster settings - SQL Server 大数据群集设置 - - - Confirm password - 确认密码 - - - Controller username - 控制器用户名 - - - Capacity for data (GB) - 数据容量(GB) - - - Deployment target - 部署目标 - - - Existing Azure Kubernetes Service Cluster - 现有 Azure Kubernetes 服务群集 - - - Existing Azure Red Hat OpenShift cluster - 现有 Azure Red Hat OpenShift 群集 - - - Existing Kubernetes Cluster (kubeadm) - 现有 Kubernetes 群集(kubeadm) - - - Existing OpenShift cluster - 现有 OpenShift 群集 - - - New Azure Kubernetes Service Cluster - 新的 Azure Kubernetes 服务群集 - - - Capacity for logs (GB) - 日志容量(GB) - - - Password - 密码 - - - Storage class name - 存储类名称 - - - Big Data Cluster - 大数据群集 - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - 如果为 true,则忽略针对 SQL Server 大数据群集终结点(如 HDFS、Spark 和控制器)的 SSL 验证错误 - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - 未注册任何 SQL 大数据群集控制器。[了解更多](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[连接控制器](command:bigDataClusters.command.connectController) - - - Loading controllers... - 正在加载控制器... - - - Connect to Existing Controller - 连接到现有控制器 - - - Create New Controller - 创建新控制器 - - - Delete Mount - 删除装载 - - - Manage - 管理 - - - Mount HDFS - 装载 HDFS - - - Refresh - 刷新 - - - Refresh Mount - 刷新装载 - - - Remove Controller - 移除控制器 - - - Support for managing SQL Server Big Data Clusters - 支持管理 SQL Server 大数据群集 - - - Microsoft Privacy Statement - Microsoft 隐私声明 - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - 借助 SQL Server 大数据群集,可部署在 Kubernetes 上运行的 SQL Server、Spark 和 HDFS 容器的可扩展群集 - - - SQL Server Big Data Cluster - SQL Server 大数据群集 - - - SQL Server Big Data Clusters - SQL Server 大数据群集 - - - Version - 版本 - - - - \ No newline at end of file diff --git a/resources/xlf/zh-hant/big-data-cluster.zh-Hant.xlf b/resources/xlf/zh-hant/big-data-cluster.zh-Hant.xlf deleted file mode 100644 index a225786e2e..0000000000 --- a/resources/xlf/zh-hant/big-data-cluster.zh-Hant.xlf +++ /dev/null @@ -1,753 +0,0 @@ - - - - - - Error deleting mount - 刪除裝載時發生錯誤 - - - Error retrieving BDC status from {0} - 從 {0} 擷取 BDC 狀態時發生錯誤 - - - Error retrieving cluster config from {0} - 從 {0} 擷取叢集組態時發生錯誤 - - - Error retrieving endpoints from {0} - 從 {0} 擷取端點時發生錯誤 - - - Error creating mount - 建立裝載時發生錯誤 - - - Error refreshing mount - 重新整理裝載時發生錯誤 - - - Error getting mount status - 取得裝載狀態時發生錯誤 - - - Error during authentication - 驗證期間發生錯誤 - - - You do not have permission to log into this cluster using Windows Authentication - 您無權使用 Windows 驗證登入此叢集 - - - This cluster does not support Windows authentication - 此叢集不支援 Windows 驗證 - - - - - - - Add - 新增 - - - Add New Controller - 新增控制器 - - - Basic - 基本 - - - Big Data Cluster overview - 巨量資料叢集概觀 - - - Cluster Details - 叢集詳細資料 - - - Cluster Overview - 叢集概觀 - - - Cluster Properties - 叢集屬性 - - - Cluster State - 叢集狀態 - - - Copy - 複製 - - - Endpoint - 端點 - - - Health Status - 健全狀態 - - - Health Status Details - 健全狀態詳細資料 - - - Instance - 執行個體 - - - Last Updated : {0} - 上次更新日期: {0} - - - Loading cluster state completed - 載入叢集狀態已完成 - - - Loading health status completed - 載入健全狀態已完成 - - - Logs - 記錄 - - - Metrics and Logs - 計量與記錄 - - - The dashboard requires a connection. Please click retry to enter your credentials. - 儀表板需要連線。請按一下 [重試],以輸入您的認證。 - - - Node Metrics - 節點計量 - - - N/A - N/A - - - Refresh - 重新整理 - - - Service - 服務 - - - Service Endpoints - 服務端點 - - - Service Name - 服務名稱 - - - SQL Metrics - SQL 計量 - - - State - 狀態 - - - Status Icon - 狀態圖示 - - - Troubleshoot - 疑難排解 - - - Unexpected error occurred: {0} - 發生未預期的錯誤: {0} - - - View - 檢視 - - - View Details - 檢視詳細資料 - - - View Error Details - 檢視錯誤詳細資料 - - - View Kibana Logs {0} - 檢視 Kibana 記錄 {0} - - - View Node Metrics {0} - 檢視節點計量 {0} - - - View SQL Metrics {0} - 檢視 SQL 計量 {0} - - - Cancel - 取消 - - - Cluster Management URL - 叢集管理 URL - - - Connect to Controller - 連線至控制器 - - - Endpoint '{0}' copied to clipboard - 已將端點 "{0}" 複製到剪貼簿 - - - Delete Mount - 刪除裝載 - - - Deleting HDFS Mount on path {0} - 正在刪除路徑 {0} 上的 HDFS 裝載 - - - Delete mount request submitted - 已提交刪除裝載要求 - - - Unexpected error retrieving BDC Endpoints: {0} - 擷取 BDC 端點時,發生未預期的的錯誤: {0} - - - Password is required - 需要密碼 - - - Username is required - 需要使用者名稱 - - - Cluster Connection - 叢集連線 - - - Windows Authentication - Windows 驗證 - - - Mount credentials for authentication to remote data source for reads - 將驗證用認證掛接至遠端資料來源以供讀取 - - - Credentials - 認證 - - - Mount HDFS Folder - 裝載 HDFS 資料夾 - - - Bad formatting of credentials at {0} - 位於 {0} 的認證格式錯誤 - - - Unknown error occurred during the mount process - 裝載過程中發生未知錯誤 - - - Login to controller failed - 無法登入控制器 - - - Login to controller failed: {0} - 無法登入控制器: {0} - - - Path to a new (non-existing) directory which you want to associate with the mount - 要連結到掛接之新 (不存在) 目錄的路徑 - - - HDFS Path - HDFS 路徑 - - - Mount Configuration - 裝載組態 - - - The URI to the remote data source. Example for ADLS: abfs://fs@saccount.dfs.core.windows.net/ - 遠端資料來源的 URI。ADLS 範例: abfs://fs@saccount.dfs.core.windows.net/ - - - Remote URI - 遠端 URI - - - Mounting HDFS folder is complete - 裝載 HDFS 資料夾已完成 - - - Error mounting folder: {0} - 裝載資料夾時發生錯誤: {0} - - - Mounting is likely to complete, check back later to verify - 裝載即將完成,請稍後再返回確認 - - - Mounting HDFS folder on path {0} - 正在路徑 {0} 上裝載 HDFS 資料夾 - - - Mount creation has started - 已啟動裝載建立 - - - OK - 確定 - - - Password - 密碼 - - - Refresh Mount - 重新整理裝載 - - - Refreshing HDFS Mount on path {0} - 正在重新整理路徑 {0} 上的 HDFS 裝載 - - - Refresh mount request submitted - 已提交重新整理裝載要求 - - - Remember Password - 記住密碼 - - - Authentication type - 驗證類型 - - - URL - URL - - - Username - 使用者名稱 - - - - - - - Unexpected error loading saved controllers: {0} - 載入儲存的控制器時發生未預期錯誤: {0} - - - - - - - Healthy - 狀況良好 - - - Unhealthy - 狀況不良 - - - Application Proxy - 應用程式 Proxy - - - Cluster Management Service - 叢集管理服務 - - - Gateway to access HDFS files, Spark - 用來存取 HDFS 檔案的閘道,Spark - - - Metrics Dashboard - 計量儀表板 - - - Log Search Dashboard - 記錄搜尋儀表板 - - - Proxy for running Spark statements, jobs, applications - 用來執行 Spark 陳述式、作業、應用程式的 Proxy - - - Management Proxy - 管理 Proxy - - - Management Proxy - 管理 Proxy - - - Spark Jobs Management and Monitoring Dashboard - Spark 作業管理與監視儀表板 - - - SQL Server Master Instance Front-End - SQL Server 主要執行個體前端 - - - HDFS File System Proxy - HDFS 檔案系統 Proxy - - - Spark Diagnostics and Monitoring Dashboard - Spark 診斷與監視儀表板 - - - Unexpected error retrieving BDC Endpoints: {0} - 擷取 BDC 端點時,發生未預期的的錯誤: {0} - - - App - 應用程式 - - - Control - Control - - - Gateway - 閘道 - - - HDFS - Hdfs - - - Spark - Spark - - - SQL Server - SQL Server - - - Applying Upgrade - 正在套用升級 - - - Applying Managed Upgrade - 正在套用受控升級 - - - Creating - 正在建立 - - - Creating Groups - 正在建立群組 - - - Creating Kerberos Delegation Setup - 正在建立 Kerberos 委派設定 - - - Creating Resources - 正在建立資源 - - - Creating Secrets - 正在建立祕密 - - - Deleted - 已刪除 - - - Deleting - 正在刪除 - - - Error - 錯誤 - - - Managed Upgrading - 受控升級 - - - Ready - 就緒 - - - Rollback - 復原 - - - Rollback Complete - 復原完成 - - - Rollback In Progress - 正在復原 - - - Running - 正在執行 - - - Upgrade Paused - 升級已暫停 - - - Upgrading - 正在升級 - - - Waiting - 正在等候 - - - Waiting For Deletion - 正在等待刪除 - - - Waiting For Groups - 正在等待群組 - - - Waiting For Kerberos Delegation Setup - 正在等待 Kerberos 委派設定 - - - Waiting For Resources - 正在等待資源 - - - Waiting For Secrets - 正在等待秘密 - - - Waiting For Upgrade - 正在等待升級 - - - - - - - Big Data Cluster Dashboard - - 巨量資料叢集儀表板 - - - - The Big Data Cluster add-on is being retired and Azure Data Studio functionality for it will be removed in an upcoming release. Read more about this and support going forward [here](https://go.microsoft.com/fwlink/?linkid=2207340). - 即將淘汰 Big Data Cluster 附加元件,而其 Azure Data Studio 功能將在即將推出的版本中移除。在[這裡](https://go.microsoft.com/fwlink/?linkid=2207340)閱讀更多有關此資訊以及往後的支援。 - - - Controller endpoint information was not found - 找不到控制器端點資訊 - - - Are you sure you want to remove '{0}'? - 您確定要移除 '{0}' 嗎? - - - No - - - - Yes - - - - - - - - SQL Server 2019 - SQL Server 2019 - - - I accept {0}, {1} and {2}. - 我接受 {0}、{1} 和 {2}。 - - - azdata License Terms - azdata 授權條款 - - - SQL Server License Terms - SQL Server 授權條款 - - - AKS cluster name - AKS 叢集名稱 - - - Region - 區域 - - - Resource group name - 資源群組名稱 - - - Azure settings - Azure 設定 - - - Subscription id - 訂用帳戶識別碼 - - - Use my default Azure subscription - 使用我的預設 Azure 訂用帳戶 - - - VM count - VM 計數 - - - VM size - VM 大小 - - - Cluster name - 叢集名稱 - - - SQL Server Big Data Cluster settings - SQL Server 巨量資料叢集設定 - - - Confirm password - 確認密碼 - - - Controller username - 控制器使用者名稱 - - - Capacity for data (GB) - 資料的容量 (GB) - - - Deployment target - 部署目標 - - - Existing Azure Kubernetes Service Cluster - 現有的 Azure Kubernetes 服務叢集 - - - Existing Azure Red Hat OpenShift cluster - 現有的 Azure Red Hat OpenShift 叢集 - - - Existing Kubernetes Cluster (kubeadm) - 現有的 Kubernetes 叢集 (kubeadm) - - - Existing OpenShift cluster - 現有的 OpenShift 叢集 - - - New Azure Kubernetes Service Cluster - 新增 Azure Kubernetes Service 叢集 - - - Capacity for logs (GB) - 記錄的容量 (GB) - - - Password - 密碼 - - - Storage class name - 儲存類別名稱 - - - Big Data Cluster - 巨量資料叢集 - - - Ignore SSL verification errors against SQL Server Big Data Cluster endpoints such as HDFS, Spark, and Controller if true - 若為 True,則忽略對 SQL Server 巨量資料叢集端點 (例如 HDFS、Spark 及控制器) 所產生的 SSL 驗證錯誤 - - - No SQL Big Data Cluster controllers registered. [Learn More](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[Connect Controller](command:bigDataClusters.command.connectController) - 未註冊任何 SQL 巨量資料叢集控制器。[深入了解](https://docs.microsoft.com/sql/big-data-cluster/big-data-cluster-overview) -[連線控制器](command:bigDataClusters.command.connectController) - - - Loading controllers... - 正在載入控制器... - - - Connect to Existing Controller - 連線至現有的控制器 - - - Create New Controller - 建立新控制器 - - - Delete Mount - 刪除裝載 - - - Manage - 管理 - - - Mount HDFS - 裝載 HDFS - - - Refresh - 重新整理 - - - Refresh Mount - 重新整理裝載 - - - Remove Controller - 移除控制器 - - - Support for managing SQL Server Big Data Clusters - 管理 SQL Server 巨量資料叢集的支援 - - - Microsoft Privacy Statement - Microsoft 隱私權聲明 - - - SQL Server Big Data Cluster allows you to deploy scalable clusters of SQL Server, Spark, and HDFS containers running on Kubernetes - SQL Server 巨量資料叢集,可讓您部署於 Kubernetes 上執行且可調整的 SQL Server、Spark 和 HDFS 容器叢集 - - - SQL Server Big Data Cluster - SQL Server 巨量資料叢集 - - - SQL Server Big Data Clusters - SQL Server 巨量資料叢集 - - - Version - 版本 - - - - \ No newline at end of file diff --git a/samples/notebookSamples/SampleTSQLNotebook.ipynb b/samples/notebookSamples/SampleTSQLNotebook.ipynb index 1ec76521fd..e96e498855 100644 --- a/samples/notebookSamples/SampleTSQLNotebook.ipynb +++ b/samples/notebookSamples/SampleTSQLNotebook.ipynb @@ -1,82 +1,75 @@ { - "metadata": { - "kernelspec": { - "name": "SQL", - "display_name": "SQL", - "language": "sql" - }, - "language_info": { - "name": "sql", - "version": "" - } - }, - "nbformat_minor": 2, - "nbformat": 4, "cells": [ { "cell_type": "markdown", - "source": "\r\n\r\n## SQL Notebooks in Azure Data Studio\r\n\r\nNotebooks allow rich text, images, code, and resultsets to be easily shared. This is a concept that is widely used in data science and which we feel is well suited to SQL work. \r\n", - "metadata": {} + "metadata": {}, + "source": [ + "\n", + "\n", + "## SQL Notebooks in Azure Data Studio\n", + "\n", + "Notebooks allow rich text, images, code, and resultsets to be easily shared. This is a concept that is widely used in data science and which we feel is well suited to SQL work. \n" + ] }, { "cell_type": "code", - "source": "select \r\n 'Hello SQL World' as [Greetings], \r\n @@servername as [Server Name],\r\n datename(weekday,getutcdate()) as [Today]", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "select \n", + " 'Hello SQL World' as [Greetings], \n", + " @@servername as [Server Name],\n", + " datename(weekday,getutcdate()) as [Today]" + ] }, { "cell_type": "markdown", - "source": "### Concepts\r\n\r\nNotebooks are saved in a file format of .ipynb and have a couple of logical components.\r\n\r\n**Kernel**\r\n\r\nThe language and execution environment of the notebook. Common examples are Python, R, Scala, and Spark. Azure Data Studio also offers a SQL kernel, which is the focus of this tutorial.\r\n\r\n**Attach To**\r\n\r\nThis is the compute environment for the code - basically, where it will run. For SQL this is exactly analogous to the familiar Connection property of a query.\r\n\r\n**Cell**\r\n\r\nA cell is an editable section of the notebook. Cells can be human-readable text or code. Text cells are edited in the Markdown language to allow formatting and can include rich content including images. In Azure Data Studio, code cells include intellisense where possible. Below is an example of a SQL code cell.\r\n", - "metadata": {} + "metadata": {}, + "source": [ + "### Concepts\n", + "\n", + "Notebooks are saved in a file format of .ipynb and have a couple of logical components.\n", + "\n", + "**Kernel**\n", + "\n", + "The language and execution environment of the notebook. Common examples are Python and PowerShell. Azure Data Studio also offers a SQL kernel, which is the focus of this tutorial.\n", + "\n", + "**Attach To**\n", + "\n", + "This is the compute environment for the code - basically, where it will run. For SQL this is exactly analogous to the familiar Connection property of a query.\n", + "\n", + "**Cell**\n", + "\n", + "A cell is an editable section of the notebook. Cells can be human-readable text or code. Text cells are edited in the Markdown language to allow formatting and can include rich content including images. In Azure Data Studio, code cells include intellisense where possible. Below is an example of a SQL code cell.\n" + ] }, { "cell_type": "code", - "source": "select top 5 * from sys.dm_exec_session_wait_stats order by wait_time_ms desc", + "execution_count": 7, "metadata": {}, "outputs": [ { - "output_type": "display_data", "data": { - "text/html": "(5 rows affected)" + "text/html": [ + "(5 rows affected)" + ] }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/html": "Total execution time: 00:00:00.163" - }, - "metadata": {} - }, - { - "output_type": "execute_result", "metadata": {}, - "execution_count": 7, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Total execution time: 00:00:00.163" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { "data": { "application/vnd.dataresource+json": { - "schema": { - "fields": [ - { - "name": "session_id" - }, - { - "name": "wait_type" - }, - { - "name": "waiting_tasks_count" - }, - { - "name": "wait_time_ms" - }, - { - "name": "max_wait_time_ms" - }, - { - "name": "signal_wait_time_ms" - } - ] - }, "data": [ { "0": "59", @@ -118,143 +111,300 @@ "4": "0", "5": "0" } - ] + ], + "schema": { + "fields": [ + { + "name": "session_id" + }, + { + "name": "wait_type" + }, + { + "name": "waiting_tasks_count" + }, + { + "name": "wait_time_ms" + }, + { + "name": "max_wait_time_ms" + }, + { + "name": "signal_wait_time_ms" + } + ] + } }, - "text/html": "
session_idwait_typewaiting_tasks_countwait_time_msmax_wait_time_mssignal_wait_time_ms
59ASYNC_NETWORK_IO243893470
57PREEMPTIVE_XE_GETTARGETSTATE91601020
57ASYNC_NETWORK_IO32338210
58MEMORY_ALLOCATION_EXT50662500
60PAGEIOLATCH_SH301100
" - } + "text/html": [ + "
session_idwait_typewaiting_tasks_countwait_time_msmax_wait_time_mssignal_wait_time_ms
59ASYNC_NETWORK_IO243893470
57PREEMPTIVE_XE_GETTARGETSTATE91601020
57ASYNC_NETWORK_IO32338210
58MEMORY_ALLOCATION_EXT50662500
60PAGEIOLATCH_SH301100
" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" } ], - "execution_count": 7 + "source": [ + "select top 5 * from sys.dm_exec_session_wait_stats order by wait_time_ms desc" + ] }, { "cell_type": "markdown", - "source": "**Results**\r\n\r\nCode cells can be executed against the environment specified in Attach To, and their results are saved into the notebook. Once executed and saved, the results can be opened with the file without having be be re-executed. Results may be text, grids, charts, or other visualizations.\r\n\r\n**Trusted vs Non-Trusted**\r\n\r\nSince notebooks can contain HTML and Javascript code, it would be possible for a maliciously formed notebook to execute code simply upon being opened by the user. To prevent this, notebooks have the concept of \"Trusted\" and \"Untrusted.\" Untrusted HTML is sanitized and untrusted Javascript is not executed. Users can mark notebooks trusted if necessary, but in common use cases an untrusted notebook will perform the same as a trusted one. This is similar to the behavior of many other file types when downloaded from the internet.", - "metadata": {} + "metadata": {}, + "source": [ + "**Results**\n", + "\n", + "Code cells can be executed against the environment specified in Attach To, and their results are saved into the notebook. Once executed and saved, the results can be opened with the file without having be be re-executed. Results may be text, grids, charts, or other visualizations.\n", + "\n", + "**Trusted vs Non-Trusted**\n", + "\n", + "Since notebooks can contain HTML and Javascript code, it would be possible for a maliciously formed notebook to execute code simply upon being opened by the user. To prevent this, notebooks have the concept of \"Trusted\" and \"Untrusted.\" Untrusted HTML is sanitized and untrusted Javascript is not executed. Users can mark notebooks trusted if necessary, but in common use cases an untrusted notebook will perform the same as a trusted one. This is similar to the behavior of many other file types when downloaded from the internet." + ] }, { "cell_type": "markdown", - "source": "### Editing Experience\r\n\r\nEditing SQL is the same in Azure Data Studio notebooks as in the Azure Data Studio query editor - the same Intellisense, the same snippet support. Markdown is edited with a custom editor which shows a preview during edit, and only shows the processed markdown when not in edit mode. Code and markdown (text) cells can be added with buttons or through context menus.", - "metadata": {} + "metadata": {}, + "source": [ + "### Editing Experience\n", + "\n", + "Editing SQL is the same in Azure Data Studio notebooks as in the Azure Data Studio query editor - the same Intellisense, the same snippet support. Markdown is edited with a custom editor which shows a preview during edit, and only shows the processed markdown when not in edit mode. Code and markdown (text) cells can be added with buttons or through context menus." + ] }, { "cell_type": "code", - "source": "select top 10 * from sys.databases", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "select top 10 * from sys.databases" + ] }, { "cell_type": "markdown", - "source": "### Code Separation and Context\r\n\r\nThe separate code cells in a SQL notebook are run as separate batches. This is equivalent to having a GO statement between cells.\r\n\r\nThis is an area we're actively innovating in and want to make a little more straightforward! But we'll describe the current behavior.\r\n\r\nIn this code, we're setting the database context to WideWorldImporters and then creating a local variable. We're also setting nocount on, to demonstrate set statement behavior.", - "metadata": {} + "metadata": {}, + "source": [ + "### Code Separation and Context\n", + "\n", + "The separate code cells in a SQL notebook are run as separate batches. This is equivalent to having a GO statement between cells.\n", + "\n", + "This is an area we're actively innovating in and want to make a little more straightforward! But we'll describe the current behavior.\n", + "\n", + "In this code, we're setting the database context to WideWorldImporters and then creating a local variable. We're also setting nocount on, to demonstrate set statement behavior." + ] }, { "cell_type": "code", - "source": "set nocount on;\r\nuse WideWorldImporters;\r\ndeclare @i int;\r\nset @i = 1;\r\nselect @i [Value of @i], db_name() as [MyDatabaseName]", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "set nocount on;\n", + "use WideWorldImporters;\n", + "declare @i int;\n", + "set @i = 1;\n", + "select @i [Value of @i], db_name() as [MyDatabaseName]" + ] }, { "cell_type": "markdown", - "source": "The next cell will show an error, since the variable @i is not declared within this cell.", - "metadata": {} + "metadata": {}, + "source": [ + "The next cell will show an error, since the variable @i is not declared within this cell." + ] }, { "cell_type": "code", - "source": "select @i [Value of @i], db_name() as [MyDatabaseName]", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "select @i [Value of @i], db_name() as [MyDatabaseName]" + ] }, { "cell_type": "markdown", - "source": "In the next cell, we're re-declaring @i, but not setting it, so the value remains null.\r\n\r\nHowever, the previously set database context and nocount settings are maintained as these are durable throughout the session. This is the same behavior as the query editor.", - "metadata": {} + "metadata": {}, + "source": [ + "In the next cell, we're re-declaring @i, but not setting it, so the value remains null.\n", + "\n", + "However, the previously set database context and nocount settings are maintained as these are durable throughout the session. This is the same behavior as the query editor." + ] }, { "cell_type": "code", - "source": "declare @i int;\r\nselect @i [Value of @i], db_name() as [MyDatabaseName]", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "declare @i int;\n", + "select @i [Value of @i], db_name() as [MyDatabaseName]" + ] }, { "cell_type": "markdown", - "source": "One upshot of this - not unique to SQL notebooks - is that code cell execution order *does matter*\r\n\r\nWe think this is an area we can innovate in and welcome your suggestions on how to make this easier to work with in a notebook context.", - "metadata": {} + "metadata": {}, + "source": [ + "One upshot of this - not unique to SQL notebooks - is that code cell execution order *does matter*\n", + "\n", + "We think this is an area we can innovate in and welcome your suggestions on how to make this easier to work with in a notebook context." + ] }, { "cell_type": "markdown", - "source": "### Multi-language support\r\n\r\nWhile in a SQL notebook you can change to run R and Python on the SQL Server with a special syntax. This allows you to type your raw Python or R and execute it without having to remember how to wrap it up in a stored procedure (provided that the Advanced Analytics Extensions feature is installed and enabled on the target server)", - "metadata": {} + "metadata": {}, + "source": [ + "### Multi-language support\n", + "\n", + "While in a SQL notebook you can change to run R and Python on the SQL Server with a special syntax. This allows you to type your raw Python or R and execute it without having to remember how to wrap it up in a stored procedure (provided that the Advanced Analytics Extensions feature is installed and enabled on the target server)" + ] }, { "cell_type": "code", - "source": "%%lang_python\r\nprint (\"Hello World\")", + "execution_count": 1, "metadata": { "language": "python" }, "outputs": [ { - "output_type": "display_data", "data": { - "text/html": "STDOUT message(s) from external script: \nHello World\r" + "text/html": [ + "STDOUT message(s) from external script: \n", + "Hello World\r" + ] }, - "metadata": {} + "metadata": {}, + "output_type": "display_data" }, { - "output_type": "display_data", "data": { - "text/html": "Total execution time: 00:00:21.211" + "text/html": [ + "Total execution time: 00:00:21.211" + ] }, - "metadata": {} + "metadata": {}, + "output_type": "display_data" } ], - "execution_count": 1 + "source": [ + "%%lang_python\n", + "print (\"Hello World\")" + ] }, { "cell_type": "markdown", - "source": "### Use Cases\r\n\r\nWe're envisioning a number of use cases for the SQL notebooks. In many ways it's simply the next generation experience for the query editor, and many of the things you can do in a plain editor can be done as well or better in a notebook. But a few scenarios have jumped out at us:\r\n\r\n- Teaching Tools \r\n- Runbooks\r\n- Business reporting and presentations\r\n- Troubleshooting\r\n- Deployments\r\n- Baselining\r\n\r\nWe're excited to learn what the community can do with them!", - "metadata": {} + "metadata": {}, + "source": [ + "### Use Cases\n", + "\n", + "We're envisioning a number of use cases for the SQL notebooks. In many ways it's simply the next generation experience for the query editor, and many of the things you can do in a plain editor can be done as well or better in a notebook. But a few scenarios have jumped out at us:\n", + "\n", + "- Teaching Tools \n", + "- Runbooks\n", + "- Business reporting and presentations\n", + "- Troubleshooting\n", + "- Deployments\n", + "- Baselining\n", + "\n", + "We're excited to learn what the community can do with them!" + ] }, { "cell_type": "markdown", - "source": "**Example Server Information Report**\r\n\r\nRun this report and save it to a known location to maintain a basic point in time inventory.", - "metadata": {} + "metadata": {}, + "source": [ + "**Example Server Information Report**\n", + "\n", + "Run this report and save it to a known location to maintain a basic point in time inventory." + ] }, { "cell_type": "code", - "source": "--Server basics\r\nSELECT\r\n getutcdate() as DateRun, \r\n db_name() as DatabaseName,\r\n serverproperty('ServerName') as ServerName,\r\n serverproperty('InstanceName') as InstanceName,\r\n serverproperty('ComputerNamePhysicalNetBIOS') as PhysicalName,\r\n serverproperty('Edition') as Edition,\r\n serverproperty('ProductMajorVersion') as MajorVersion,\r\n serverproperty('ProductMinorVersion') as MinorVersion\r\n\r\n--Databases\r\nSELECT *\r\nfrom sys.databases", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "--Server basics\n", + "SELECT\n", + " getutcdate() as DateRun, \n", + " db_name() as DatabaseName,\n", + " serverproperty('ServerName') as ServerName,\n", + " serverproperty('InstanceName') as InstanceName,\n", + " serverproperty('ComputerNamePhysicalNetBIOS') as PhysicalName,\n", + " serverproperty('Edition') as Edition,\n", + " serverproperty('ProductMajorVersion') as MajorVersion,\n", + " serverproperty('ProductMinorVersion') as MinorVersion\n", + "\n", + "--Databases\n", + "SELECT *\n", + "from sys.databases" + ] }, { "cell_type": "markdown", - "source": "**More SQL Examples**\r\n\r\n", - "metadata": {} + "metadata": {}, + "source": [ + "**More SQL Examples**\n", + "\n" + ] }, { "cell_type": "code", - "source": "DECLARE @i INT, @c varchar(26);\r\nSELECT @i = 65, @c = '';\r\nWHILE (@i < 93)\r\nBEGIN\r\n SELECT @c = concat(@c,CHAR(@i))\r\n SET @i = @i + 1\r\nEND\r\n\r\n\r\nSELECT @c as \"Letters\", len(@c) as \"Count\"", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "DECLARE @i INT, @c varchar(26);\n", + "SELECT @i = 65, @c = '';\n", + "WHILE (@i < 93)\n", + "BEGIN\n", + " SELECT @c = concat(@c,CHAR(@i))\n", + " SET @i = @i + 1\n", + "END\n", + "\n", + "\n", + "SELECT @c as \"Letters\", len(@c) as \"Count\"" + ] }, { "cell_type": "code", - "source": "DROP TABLE IF EXISTS [dbo].[MyNotebookTable]\r\nCREATE TABLE [dbo].[MyNotebookTable]\r\n(\r\n [Id] INT IDENTITY NOT NULL PRIMARY KEY, -- Primary Key column\r\n [FirstValue] NVARCHAR(50) NOT NULL,\r\n [SecondValue] NVARCHAR(50) NOT NULL\r\n);\r\n\r\nPRINT 'Success: Created MyNotebookTable'\r\n", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "DROP TABLE IF EXISTS [dbo].[MyNotebookTable]\n", + "CREATE TABLE [dbo].[MyNotebookTable]\n", + "(\n", + " [Id] INT IDENTITY NOT NULL PRIMARY KEY, -- Primary Key column\n", + " [FirstValue] NVARCHAR(50) NOT NULL,\n", + " [SecondValue] NVARCHAR(50) NOT NULL\n", + ");\n", + "\n", + "PRINT 'Success: Created MyNotebookTable'\n" + ] }, { "cell_type": "code", - "source": "raiserror('Something bad happened!',0,1) with nowait;\r\nwaitfor delay '00:00:05'\r\nraiserror('Something bad happened... again!',0,1) with nowait;", + "execution_count": 1, "metadata": {}, "outputs": [], - "execution_count": 1 + "source": [ + "raiserror('Something bad happened!',0,1) with nowait;\n", + "waitfor delay '00:00:05'\n", + "raiserror('Something bad happened... again!',0,1) with nowait;" + ] } - ] + ], + "metadata": { + "kernelspec": { + "display_name": "SQL", + "language": "sql", + "name": "SQL" + }, + "language_info": { + "name": "sql", + "version": "" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/scripts/sql-test-integration.sh b/scripts/sql-test-integration.sh index 919101472a..00b3890bc0 100755 --- a/scripts/sql-test-integration.sh +++ b/scripts/sql-test-integration.sh @@ -43,7 +43,6 @@ fi --extensionDevelopmentPath=$ROOT/extensions/admin-tool-ext-win \ --extensionDevelopmentPath=$ROOT/extensions/agent \ --extensionDevelopmentPath=$ROOT/extensions/azurecore \ ---extensionDevelopmentPath=$ROOT/extensions/big-data-cluster \ --extensionDevelopmentPath=$ROOT/extensions/cms \ --extensionDevelopmentPath=$ROOT/extensions/dacpac \ --extensionDevelopmentPath=$ROOT/extensions/import \ diff --git a/src/sql/platform/telemetry/common/adsTelemetryService.ts b/src/sql/platform/telemetry/common/adsTelemetryService.ts index 6b894bd196..a4a66af5b9 100644 --- a/src/sql/platform/telemetry/common/adsTelemetryService.ts +++ b/src/sql/platform/telemetry/common/adsTelemetryService.ts @@ -60,7 +60,6 @@ class TelemetryEventImpl implements ITelemetryEvent { serverVersion: serverInfo?.serverVersion ?? '', serverEdition: serverInfo?.serverEdition ?? '', serverEngineEdition: serverInfo?.engineEditionId ?? '', - isBigDataCluster: serverInfo?.options?.isBigDataCluster ?? false, }); return this; } diff --git a/src/sql/workbench/contrib/dashboard/browser/pages/databaseDashboardPage.contribution.ts b/src/sql/workbench/contrib/dashboard/browser/pages/databaseDashboardPage.contribution.ts index c41f412d1b..38623a1517 100644 --- a/src/sql/workbench/contrib/dashboard/browser/pages/databaseDashboardPage.contribution.ts +++ b/src/sql/workbench/contrib/dashboard/browser/pages/databaseDashboardPage.contribution.ts @@ -97,7 +97,6 @@ export const databaseDashboardSettingSchema: IJSONSchema = { widget: { 'tasks-widget': [ 'newQuery', - 'mssqlCluster.task.newNotebook', { name: 'backup', when: 'connectionProvider == \'MSSQL\' && !mssql:iscloud && mssql:engineedition != 11 || connectionProvider == \'PGSQL\'' }, { name: 'restore', when: 'connectionProvider == \'MSSQL\' && !mssql:iscloud && mssql:engineedition != 11 || connectionProvider == \'PGSQL\'' } ] diff --git a/src/sql/workbench/contrib/dashboard/browser/pages/serverDashboardPage.contribution.ts b/src/sql/workbench/contrib/dashboard/browser/pages/serverDashboardPage.contribution.ts index 1c6e8c5035..c8d732866f 100644 --- a/src/sql/workbench/contrib/dashboard/browser/pages/serverDashboardPage.contribution.ts +++ b/src/sql/workbench/contrib/dashboard/browser/pages/serverDashboardPage.contribution.ts @@ -79,7 +79,6 @@ const defaultVal = [ widget: { 'tasks-widget': [ 'newQuery', - 'mssqlCluster.task.newNotebook', { name: 'restore', when: 'connectionProvider == \'MSSQL\' && !mssql:iscloud && mssql:engineedition != 11 || connectionProvider == \'PGSQL\'' }] }, gridItemConfig: { diff --git a/src/sql/workbench/contrib/notebook/browser/notebookActions.ts b/src/sql/workbench/contrib/notebook/browser/notebookActions.ts index 731461cd6f..701ab0374b 100644 --- a/src/sql/workbench/contrib/notebook/browser/notebookActions.ts +++ b/src/sql/workbench/contrib/notebook/browser/notebookActions.ts @@ -464,7 +464,7 @@ export class RunParametersAction extends TooltipFromLabelAction { return; } const editor = this._notebookService.findNotebookEditor(context); - // Only run action for kernels that are supported (Python, PySpark, PowerShell) + // Only run action for kernels that are supported (Python, PowerShell) let supportedKernels: string[] = [KernelsLanguage.Python, KernelsLanguage.PowerShell]; if (!supportedKernels.includes(editor.model.languageInfo.name)) { // If the kernel is not supported indicate to user to use supported kernels diff --git a/src/sql/workbench/contrib/notebook/common/models/notebookConnection.ts b/src/sql/workbench/contrib/notebook/common/models/notebookConnection.ts deleted file mode 100644 index 55d8fc5560..0000000000 --- a/src/sql/workbench/contrib/notebook/common/models/notebookConnection.ts +++ /dev/null @@ -1,89 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the Source EULA. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -import { localize } from 'vs/nls'; - -import { IConnectionProfile } from 'sql/platform/connection/common/interfaces'; - -export namespace constants { - export const userPropName = 'user'; - export const knoxPortPropName = 'knoxport'; - export const clusterPropName = 'clustername'; - export const passwordPropName = 'password'; - export const defaultKnoxPort = '30443'; -} -/** - * This is a temporary connection definition, with known properties for Knox gateway connections. - * Long term this should be refactored to an extension contribution - * - * @export - */ -export class NotebookConnection { - private _host: string; - private _knoxPort: string; - - constructor(private _connectionProfile: IConnectionProfile) { - if (!this._connectionProfile) { - throw new Error(localize('connectionInfoMissing', "connectionInfo is required")); - } - } - - public get connectionProfile(): IConnectionProfile { - return this._connectionProfile; - } - - - public get host(): string { - if (!this._host) { - this.ensureHostAndPort(); - } - return this._host; - } - - /** - * Sets host and port values, using any ',' or ':' delimited port in the hostname in - * preference to the built in port. - */ - private ensureHostAndPort(): void { - this._host = this.connectionProfile.serverName; - this._knoxPort = NotebookConnection.getKnoxPortOrDefault(this.connectionProfile); - // determine whether the host has either a ',' or ':' in it - this.setHostAndPort(','); - this.setHostAndPort(':'); - } - - // set port and host correctly after we've identified that a delimiter exists in the host name - private setHostAndPort(delimeter: string): void { - let originalHost = this._host; - let index = originalHost.indexOf(delimeter); - if (index > -1) { - this._host = originalHost.slice(0, index); - this._knoxPort = originalHost.slice(index + 1); - } - } - - public get user(): string { - return this._connectionProfile.options[constants.userPropName]; - } - - public get password(): string { - return this._connectionProfile.options[constants.passwordPropName]; - } - - public get knoxport(): string { - if (!this._knoxPort) { - this.ensureHostAndPort(); - } - return this._knoxPort; - } - - private static getKnoxPortOrDefault(connectionProfile: IConnectionProfile): string { - let port = connectionProfile.options[constants.knoxPortPropName]; - if (!port) { - port = constants.defaultKnoxPort; - } - return port; - } -} diff --git a/src/sql/workbench/contrib/notebook/test/browser/notebookActions.test.ts b/src/sql/workbench/contrib/notebook/test/browser/notebookActions.test.ts index e950710473..2c3c570de2 100644 --- a/src/sql/workbench/contrib/notebook/test/browser/notebookActions.test.ts +++ b/src/sql/workbench/contrib/notebook/test/browser/notebookActions.test.ts @@ -472,7 +472,7 @@ suite('Notebook Actions', function (): void { }); test('Should inform user kernel is not supported if Run with Parameters Action is run with unsupported kernels', async function (): Promise { - // Kernels that are supported (Python, PySpark, PowerShell) + // Kernels that are supported (Python, PowerShell) const testContents: azdata.nb.INotebookContents = { cells: [{ @@ -517,7 +517,7 @@ suite('Notebook Actions', function (): void { }); test('Should inform user that run with parameters is not supported for untitled notebooks', async function (): Promise { - // Kernels that are supported (Python, PySpark, PowerShell) + // Kernels that are supported (Python, PowerShell) const untitledUri = URI.parse('untitled:Notebook-0'); const testContents: azdata.nb.INotebookContents = { cells: [{ diff --git a/src/sql/workbench/contrib/notebook/test/electron-browser/cell.test.ts b/src/sql/workbench/contrib/notebook/test/electron-browser/cell.test.ts index 9f47b0acdc..3862b47bd3 100644 --- a/src/sql/workbench/contrib/notebook/test/electron-browser/cell.test.ts +++ b/src/sql/workbench/contrib/notebook/test/electron-browser/cell.test.ts @@ -95,23 +95,6 @@ suite('Cell Model', function (): void { assert.strictEqual(cell.language, 'python'); }); - test('Should set cell language to python if defined as pyspark in languageInfo', async function (): Promise { - let cellData: nb.ICellContents = { - cell_type: CellTypes.Code, - source: 'print(\'1\')', - metadata: { language: 'python' }, - execution_count: 1 - }; - - let notebookModel = new NotebookModelStub({ - name: 'pyspark', - version: '', - mimetype: '' - }); - let cell = factory.createCell(cellData, { notebook: notebookModel, isTrusted: false }); - assert.strictEqual(cell.language, 'python'); - }); - test('Should keep cell language as python if cell has language override', async function (): Promise { let cellData: nb.ICellContents = { cell_type: CellTypes.Code, @@ -121,7 +104,7 @@ suite('Cell Model', function (): void { }; let notebookModel = new NotebookModelStub({ - name: 'scala', + name: 'powershell', version: '', mimetype: '' }); @@ -560,7 +543,7 @@ suite('Cell Model', function (): void { notebook: new NotebookModelStub({ name: '', version: '', - mimetype: 'x-scala' + mimetype: '' }), isTrusted: false }); diff --git a/src/sql/workbench/contrib/notebook/test/electron-browser/notebookUtils.test.ts b/src/sql/workbench/contrib/notebook/test/electron-browser/notebookUtils.test.ts index 51faff0acb..3c325a523c 100644 --- a/src/sql/workbench/contrib/notebook/test/electron-browser/notebookUtils.test.ts +++ b/src/sql/workbench/contrib/notebook/test/electron-browser/notebookUtils.test.ts @@ -6,8 +6,8 @@ import * as assert from 'assert'; import * as TypeMoq from 'typemoq'; -import { nb, ServerInfo } from 'azdata'; -import { getHostAndPortFromEndpoint, isStream, getProvidersForFileName, asyncForEach, clusterEndpointsProperty, getClusterEndpoints, RawEndpoint, IEndpoint, getStandardKernelsForProvider, IStandardKernelWithProvider, rewriteUrlUsingRegex } from 'sql/workbench/services/notebook/browser/models/notebookUtils'; +import { nb } from 'azdata'; +import { isStream, getProvidersForFileName, asyncForEach, getStandardKernelsForProvider, IStandardKernelWithProvider } from 'sql/workbench/services/notebook/browser/models/notebookUtils'; import { INotebookService, DEFAULT_NOTEBOOK_PROVIDER, SQL_NOTEBOOK_PROVIDER } from 'sql/workbench/services/notebook/browser/notebookService'; import { NotebookServiceStub } from 'sql/workbench/contrib/notebook/test/stubs'; import { tryMatchCellMagic, extractCellMagicCommandPlusArgs } from 'sql/workbench/services/notebook/browser/utils'; @@ -209,80 +209,6 @@ suite('notebookUtils', function (): void { await asyncForEach([1, 2, 3, 4], undefined); }); - test('getClusterEndpoints Test', async function (): Promise { - let serverInfo = { - options: {} - }; - - serverInfo.options[clusterEndpointsProperty] = undefined; - let result = getClusterEndpoints(serverInfo); - assert.deepStrictEqual(result, []); - - serverInfo.options[clusterEndpointsProperty] = []; - result = getClusterEndpoints(serverInfo); - assert.deepStrictEqual(result, []); - - let testEndpoint = { - serviceName: 'testName', - description: 'testDescription', - endpoint: 'testEndpoint', - protocol: 'testProtocol', - ipAddress: 'testIpAddress', - port: 1433 - }; - serverInfo.options[clusterEndpointsProperty] = [testEndpoint]; - result = getClusterEndpoints(serverInfo); - assert.deepStrictEqual(result, [{ - serviceName: testEndpoint.serviceName, - description: testEndpoint.description, - endpoint: testEndpoint.endpoint, - protocol: testEndpoint.protocol - }]); - - testEndpoint.endpoint = undefined; - result = getClusterEndpoints(serverInfo); - assert.deepStrictEqual(result, [{ - serviceName: testEndpoint.serviceName, - description: testEndpoint.description, - endpoint: 'https://testIpAddress:1433', - protocol: testEndpoint.protocol - }]); - }); - - test('getHostAndPortFromEndpoint Test', async function (): Promise { - let result = getHostAndPortFromEndpoint('https://localhost:1433'); - assert.strictEqual(result.host, 'localhost'); - assert.strictEqual(result.port, '1433'); - - result = getHostAndPortFromEndpoint('tcp://localhost,12345'); - assert.strictEqual(result.host, 'localhost'); - assert.strictEqual(result.port, '12345'); - - result = getHostAndPortFromEndpoint('tcp://localhost'); - assert.strictEqual(result.host, 'localhost'); - assert.strictEqual(result.port, undefined); - - result = getHostAndPortFromEndpoint('localhost'); - assert.strictEqual(result.host, ''); - assert.strictEqual(result.port, undefined); - - result = getHostAndPortFromEndpoint('localhost:1433'); - assert.strictEqual(result.host, ''); - assert.strictEqual(result.port, undefined); - }); - - test('rewriteUrlUsingRegex Test', async function (): Promise { - // Give a URL that should be rewritten - let html = 'output).data['text/html'] = html; - } - } - } - } - catch (e) { } - } - return output; - } - public setStdInHandler(handler: nb.MessageHandler): void { this._stdInHandler = handler; } @@ -1129,23 +1092,6 @@ export class CellModel extends Disposable implements ICellModel { this.cellUri = uri; } - // Get Knox endpoint from IConnectionProfile - // TODO: this will be refactored out into the notebooks extension as a contribution point - private getGatewayEndpoint(activeConnection: IConnectionProfile): notebookUtils.IEndpoint { - let endpoint; - if (this._connectionManagementService && activeConnection && activeConnection.providerName.toLowerCase() === notebookConstants.SQL_CONNECTION_PROVIDER.toLowerCase()) { - let serverInfo: ServerInfo = this._connectionManagementService.getServerInfo(activeConnection.id); - if (serverInfo) { - let endpoints: notebookUtils.IEndpoint[] = notebookUtils.getClusterEndpoints(serverInfo); - if (endpoints && endpoints.length > 0) { - endpoint = endpoints.find(ep => ep.serviceName.toLowerCase() === notebookUtils.hadoopEndpointNameGateway); - } - } - } - return endpoint; - } - - private getMultilineSource(source: string | string[]): string | string[] { if (source === undefined) { return []; diff --git a/src/sql/workbench/services/notebook/browser/models/notebookUtils.ts b/src/sql/workbench/services/notebook/browser/models/notebookUtils.ts index 788e5448d1..318a5945c0 100644 --- a/src/sql/workbench/services/notebook/browser/models/notebookUtils.ts +++ b/src/sql/workbench/services/notebook/browser/models/notebookUtils.ts @@ -4,13 +4,10 @@ *--------------------------------------------------------------------------------------------*/ import * as path from 'vs/base/common/path'; -import { nb, ServerInfo } from 'azdata'; +import { nb } from 'azdata'; import { DEFAULT_NOTEBOOK_PROVIDER, INotebookService, SQL_NOTEBOOK_PROVIDER } from 'sql/workbench/services/notebook/browser/notebookService'; -import { URI } from 'vs/base/common/uri'; import { DEFAULT_NOTEBOOK_FILETYPE, NotebookLanguage } from 'sql/workbench/common/constants'; -export const clusterEndpointsProperty = 'clusterEndpoints'; -export const hadoopEndpointNameGateway = 'gateway'; /** * Test whether an output is from a stream. */ @@ -68,13 +65,6 @@ export interface IStandardKernelWithProvider { readonly supportedFileExtensions?: string[]; } -export interface IEndpoint { - serviceName: string; - description: string; - endpoint: string; - protocol: string; -} - export async function asyncForEach(array: any[], callback: Function): Promise { if (array && callback) { for (let index = 0; index < array.length; index++) { @@ -82,67 +72,3 @@ export async function asyncForEach(array: any[], callback: Function): Promise { - // If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS - let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`; - let updatedEndpoint: IEndpoint = { - serviceName: e.serviceName, - description: e.description, - endpoint: endpoint, - protocol: e.protocol - }; - return updatedEndpoint; - }); -} - -export type HostAndIp = { host: string, port: string }; - -export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp { - let authority = URI.parse(endpoint).authority; - let hostAndPortRegex = /^(.*)([,:](\d+))/g; - let match = hostAndPortRegex.exec(authority); - if (match) { - return { - host: match[1], - port: match[3] - }; - } - return { - host: authority, - port: undefined - }; -} - -export function rewriteUrlUsingRegex(regex: RegExp, html: string, host: string, port: string, target: string): string { - return html.replace(regex, function (a, b, c) { - let ret = ''; - if (b !== '') { - ret = 'https://' + host + port + target; - } - if (c !== '') { - ret = ret + c; - } - return ret; - }); -} - -export interface RawEndpoint { - serviceName: string; - description?: string; - endpoint?: string; - protocol?: string; - ipAddress?: string; - port?: number; -} - -export interface IEndpoint { - serviceName: string; - description: string; - endpoint: string; - protocol: string; -} diff --git a/src/sql/workbench/services/notebook/common/notebookConstants.ts b/src/sql/workbench/services/notebook/common/notebookConstants.ts index bfd90e51e1..42b8d7cd46 100644 --- a/src/sql/workbench/services/notebook/common/notebookConstants.ts +++ b/src/sql/workbench/services/notebook/common/notebookConstants.ts @@ -18,9 +18,6 @@ export namespace nbversion { export enum KernelsLanguage { SQL = 'sql', Python = 'python', - PySpark = 'python', - SparkScala = 'scala', - SparkR = 'sparkr', PowerShell = 'powershell', CSharp = 'csharp', FSharp = 'fsharp' diff --git a/src/sql/workbench/services/notebook/common/notebookRegistry.ts b/src/sql/workbench/services/notebook/common/notebookRegistry.ts index f7992fd7fa..28c2d584c6 100644 --- a/src/sql/workbench/services/notebook/common/notebookRegistry.ts +++ b/src/sql/workbench/services/notebook/common/notebookRegistry.ts @@ -86,11 +86,11 @@ let notebookLanguageMagicType: IJSONSchema = { type: 'string' }, executionTarget: { - description: localize('carbon.extension.contributes.notebook.executionTarget', "Optional execution target this magic indicates, for example Spark vs SQL"), + description: localize('carbon.extension.contributes.notebook.executionTarget', "Optional execution target this magic indicates, for example Python vs SQL"), type: 'string' }, kernels: { - description: localize('carbon.extension.contributes.notebook.kernels', "Optional set of kernels this is valid for, e.g. python3, pyspark, sql"), + description: localize('carbon.extension.contributes.notebook.kernels', "Optional set of kernels this is valid for, e.g. python3, sql"), type: 'array', items: { type: 'string'