Replace Big Data Cluster with big data cluster (#6467)

* Replace Big Data Cluster with big data cluster
Official docs guidance is to use "big data cluster" instead of "Big Data Cluster"

* Use doublequotes and full product name
This commit is contained in:
Kevin Cunnane
2019-07-23 11:54:33 -07:00
committed by GitHub
parent 546f57b473
commit b3e8f466ec
32 changed files with 129 additions and 129 deletions

View File

@@ -1,6 +1,6 @@
{
"description": "Support for managing SQL Server Big Data Clusters",
"text.sqlServerBigDataClusters": "SQL Server Big Data Clusters",
"description": "Support for managing SQL Server big data clusters",
"text.sqlServerBigDataClusters": "SQL Server big data clusters",
"command.addController.title": "Connect to Controller",
"command.deleteController.title" : "Delete",
"command.refreshController.title" : "Refresh"

View File

@@ -17,7 +17,7 @@ export class AddControllerNode extends TreeNode {
private readonly nodeType: string;
constructor() {
super(localize('textBigDataClusterControllerWithDots', 'Add Big Data Cluster Controller...'));
super(localize('textBigDataClusterControllerWithDots', "Add SQL Server big data cluster controller..."));
this.nodeType = BdcItemType.addController;
}

View File

@@ -22,7 +22,7 @@
"cells": [
{
"cell_type": "markdown",
"source": "![11811317_10153406249401648_2787740058697948111_n](https://raw.githubusercontent.com/Microsoft/sqlworkshops/master/graphics/solutions-microsoft-logo-small.png)\n\n# View the status of your big data cluster\nThis notebook allows you to see the status of the controller, master instance, and pools in your SQL Server big data cluster.\n\n## <span style=\"color:red\">Important Instructions</span>\n### **Before you begin, you will need:**\n* Big data cluster name\n* Controller username\n* Controller password\n* Controller endpoint \n\nYou can find the controller endpoint from the big data cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\n\nIf you do not know the credentials, ask the admin who deployed your cluster.\n\n### **Prerequisites**\nEnsure the following tools are installed and added to PATH before proceeding.\n\n|Tools|Description|Installation|\n|---|---|---|\n|kubectl | Command-line tool for monitoring the underlying Kuberentes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n|azdata | Command-line tool for installing and managing a big data cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |\n\n### **Instructions**\n* For the best experience, click **Run Cells** on the toolbar above. This will automatically execute all code cells below and show the cluster status in each table.\n* When you click **Run Cells** for this Notebook, you will be prompted at the *Log in to your big data cluster* code cell to provide your login credentials. Follow the prompts and press enter to proceed.\n* **You won't need to modify any of the code cell contents** in this Notebook. If you accidentally made a change, you can reopen this Notebook from the bdc dashboard.\n\n\n",
"source": "![11811317_10153406249401648_2787740058697948111_n](https://raw.githubusercontent.com/Microsoft/sqlworkshops/master/graphics/solutions-microsoft-logo-small.png)\n\n# View the status of your SQL Server big data cluster\nThis notebook allows you to see the status of the controller, master instance, and pools in your SQL Server big data cluster.\n\n## <span style=\"color:red\">Important Instructions</span>\n### **Before you begin, you will need:**\n* Big data cluster name\n* Controller username\n* Controller password\n* Controller endpoint \n\nYou can find the controller endpoint from the SQL Server big data cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\n\nIf you do not know the credentials, ask the admin who deployed your cluster.\n\n### **Prerequisites**\nEnsure the following tools are installed and added to PATH before proceeding.\n\n|Tools|Description|Installation|\n|---|---|---|\n|kubectl | Command-line tool for monitoring the underlying Kuberentes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n|azdata | Command-line tool for installing and managing a SQL Server big data cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |\n\n### **Instructions**\n* For the best experience, click **Run Cells** on the toolbar above. This will automatically execute all code cells below and show the cluster status in each table.\n* When you click **Run Cells** for this Notebook, you will be prompted at the *Log in to your SQL Server big data cluster* code cell to provide your login credentials. Follow the prompts and press enter to proceed.\n* **You won't need to modify any of the code cell contents** in this Notebook. If you accidentally made a change, you can reopen this Notebook from the bdc dashboard.\n\n\n",
"metadata": {}
},
{
@@ -51,24 +51,24 @@
},
{
"cell_type": "markdown",
"source": "## **Log in to your big data cluster**\r\nTo view cluster status, you will need to connect to your big data cluster through mssqlctl. \r\n\r\nWhen you run this code cell, you will be prompted for:\r\n- Cluster name\r\n- Controller username\r\n- Controller password\r\n\r\nTo proceed:\r\n- **Click** on the input box\r\n- **Type** the login info\r\n- **Press** enter.\r\n\r\nIf your cluster is missing a configuration file, you will be asked to provide your controller endpoint. (Format: **https://00.00.00.000:00000**)",
"source": "## **Log in to your SQL Server big data cluster**\r\nTo view cluster status, you will need to connect to your SQL Server big data cluster through mssqlctl. \r\n\r\nWhen you run this code cell, you will be prompted for:\r\n- Cluster name\r\n- Controller username\r\n- Controller password\r\n\r\nTo proceed:\r\n- **Click** on the input box\r\n- **Type** the login info\r\n- **Press** enter.\r\n\r\nIf your cluster is missing a configuration file, you will be asked to provide your controller endpoint. (Format: **https://00.00.00.000:00000**)",
"metadata": {}
},
{
"cell_type": "code",
"source": "import os, getpass, json\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import *\n\ndef PromptForInfo(promptMsg, isPassword, errorMsg):\n if isPassword:\n promptResponse = getpass.getpass(prompt=promptMsg)\n else:\n promptResponse = input(promptMsg)\n if promptResponse == \"\":\n raise SystemExit(errorMsg + '\\n')\n return promptResponse\n\n# Prompt user inputs:\ncluster_name = PromptForInfo('Please provide your Cluster Name: ', False, 'Cluster Name is required!')\n\ncontroller_username = PromptForInfo('Please provide your Controller Username for login: ', False, 'Controller Username is required!')\n\ncontroller_password = PromptForInfo('Controller Password: ', True, 'Password is required!')\nprint('***********')\n\n!azdata logout\n# Login in to your big data cluster \ncmd = f'azdata login -n {cluster_name} -u {controller_username} -a yes'\nprint(\"Start \" + cmd)\nos.environ['CONTROLLER_USERNAME'] = controller_username\nos.environ['CONTROLLER_PASSWORD'] = controller_password\nos.environ['ACCEPT_EULA'] = 'yes'\n\nloginResult = !{cmd}\nif 'ERROR: Please check your kube config or specify the correct controller endpoint with: --controller-endpoint https://<ip>:<port>.' in loginResult[0] or 'ERROR' in loginResult[0]:\n controller_ip = input('Please provide your Controller endpoint: ')\n if controller_ip == \"\":\n raise SystemExit(f'Controller IP is required!' + '\\n')\n else:\n cmd = f'azdata login -n {cluster_name} -e {controller_ip} -u {controller_username} -a yes'\n loginResult = !{cmd}\nprint(loginResult)",
"source": "import os, getpass, json\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import *\n\ndef PromptForInfo(promptMsg, isPassword, errorMsg):\n if isPassword:\n promptResponse = getpass.getpass(prompt=promptMsg)\n else:\n promptResponse = input(promptMsg)\n if promptResponse == \"\":\n raise SystemExit(errorMsg + '\\n')\n return promptResponse\n\n# Prompt user inputs:\ncluster_name = PromptForInfo('Please provide your Cluster Name: ', False, 'Cluster Name is required!')\n\ncontroller_username = PromptForInfo('Please provide your Controller Username for login: ', False, 'Controller Username is required!')\n\ncontroller_password = PromptForInfo('Controller Password: ', True, 'Password is required!')\nprint('***********')\n\n!azdata logout\n# Login in to your SQL Server big data cluster \ncmd = f'azdata login -n {cluster_name} -u {controller_username} -a yes'\nprint(\"Start \" + cmd)\nos.environ['CONTROLLER_USERNAME'] = controller_username\nos.environ['CONTROLLER_PASSWORD'] = controller_password\nos.environ['ACCEPT_EULA'] = 'yes'\n\nloginResult = !{cmd}\nif 'ERROR: Please check your kube config or specify the correct controller endpoint with: --controller-endpoint https://<ip>:<port>.' in loginResult[0] or 'ERROR' in loginResult[0]:\n controller_ip = input('Please provide your Controller endpoint: ')\n if controller_ip == \"\":\n raise SystemExit(f'Controller IP is required!' + '\\n')\n else:\n cmd = f'azdata login -n {cluster_name} -e {controller_ip} -u {controller_username} -a yes'\n loginResult = !{cmd}\nprint(loginResult)",
"metadata": {},
"outputs": [],
"execution_count": 0
},
{
"cell_type": "markdown",
"source": "## **Status of big data cluster**\r\nAfter you successfully login to your bdc, you can view the overall status of each container before drilling down into each component.",
"source": "## **Status of SQL Server big data cluster**\r\nAfter you successfully login to your bdc, you can view the overall status of each container before drilling down into each component.",
"metadata": {}
},
{
"cell_type": "code",
"source": "# Display status of big data cluster\ndef formatColumnNames(column):\n return ' '.join(word[0].upper() + word[1:] for word in column.split())\n\npd.set_option('display.max_colwidth', -1)\ndef show_results(input):\n input = ''.join(input)\n results = json.loads(input)\n df = pd.DataFrame(results)\n df.columns = [formatColumnNames(n) for n in results[0].keys()]\n mydata = HTML(df.to_html(render_links=True))\n display(mydata)\n\nresults = !azdata bdc status show\nstrRes = ''.join(results)\njsonRes = json.loads(strRes)\ndtypes = '{'\nspark = [x for x in jsonRes if x['kind'] == 'Spark']\nif spark:\n spark_exists = True\nelse:\n spark_exists = False\nshow_results(results)",
"source": "# Display status of SQL Server big data cluster\ndef formatColumnNames(column):\n return ' '.join(word[0].upper() + word[1:] for word in column.split())\n\npd.set_option('display.max_colwidth', -1)\ndef show_results(input):\n input = ''.join(input)\n results = json.loads(input)\n df = pd.DataFrame(results)\n df.columns = [formatColumnNames(n) for n in results[0].keys()]\n mydata = HTML(df.to_html(render_links=True))\n display(mydata)\n\nresults = !azdata bdc status show\nstrRes = ''.join(results)\njsonRes = json.loads(strRes)\ndtypes = '{'\nspark = [x for x in jsonRes if x['kind'] == 'Spark']\nif spark:\n spark_exists = True\nelse:\n spark_exists = False\nshow_results(results)",
"metadata": {},
"outputs": [],
"execution_count": 0

View File

@@ -16,8 +16,8 @@
"notebook.command.new": "New Notebook",
"notebook.command.open": "Open Notebook",
"tab.bigDataClusterDescription": "Tasks and information about your SQL Server Big Data Cluster",
"title.bigDataCluster": "SQL Server Big Data Cluster",
"tab.bigDataClusterDescription": "Tasks and information about your SQL Server big data cluster",
"title.bigDataCluster": "SQL Server big data cluster",
"title.submitSparkJob": "Submit Spark Job",
"title.newSparkJob": "New Spark Job",
"title.openSparkHistory": "View Spark History",

View File

@@ -24,4 +24,4 @@ export function sparkJobSubmissionYarnUIMessage(yarnUIURL: string): string { ret
export function sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryLink: string): string { return localize('sparkJobSubmission_SparkHistoryLinkMessage', 'Spark History Url: {0} ', sparkHistoryLink); }
export function sparkJobSubmissionGetApplicationIdFailed(err: string): string { return localize('sparkJobSubmission_GetApplicationIdFailed', 'Get Application Id Failed. {0}', err); }
export function sparkJobSubmissionLocalFileNotExisted(path: string): string { return localize('sparkJobSubmission_LocalFileNotExisted', 'Local file {0} does not existed. ', path); }
export const sparkJobSubmissionNoSqlBigDataClusterFound = localize('sparkJobSubmission_NoSqlBigDataClusterFound', 'No Sql Server Big Data Cluster found.');
export const sparkJobSubmissionNoSqlBigDataClusterFound = localize('sparkJobSubmission_NoSqlBigDataClusterFound', 'No Sql Server big data cluster found.');

View File

@@ -63,7 +63,7 @@ export class OpenSparkJobSubmissionDialogCommand extends Command {
let selectedHost: string = await vscode.window.showQuickPick(displayList, {
placeHolder:
localize('sparkJobSubmission_PleaseSelectSqlWithCluster',
'Please select SQL Server with Big Data Cluster. ')
"Please select SQL Server with big data cluster.")
});
let errorMsg = localize('sparkJobSubmission_NoSqlSelected', 'No Sql Server is selected.');
if (!selectedHost) { throw new Error(errorMsg); }

View File

@@ -246,7 +246,7 @@ export class JupyterSession implements nb.ISession {
await this.getClusterEndpoint(connection.id, KNOX_ENDPOINT_KNOX) ||
await this.getClusterEndpoint(connection.id, KNOX_ENDPOINT_GATEWAY);
if (!clusterEndpoint) {
return Promise.reject(new Error(localize('connectionNotValid', 'Spark kernels require a connection to a SQL Server big data cluster master instance.')));
return Promise.reject(new Error(localize('connectionNotValid', "Spark kernels require a connection to a SQL Server big data cluster master instance.")));
}
connection.options[KNOX_ENDPOINT_SERVER] = clusterEndpoint.ipAddress;
connection.options[KNOX_ENDPOINT_PORT] = clusterEndpoint.port;

View File

@@ -13,7 +13,7 @@ export class AzdataTool implements ITool {
}
get description(): string {
return localize('resourceDeployment.AzdataDescription', 'A command-line utility written in Python that enables cluster administrators to bootstrap and manage the big data cluster via REST APIs');
return localize('resourceDeployment.AzdataDescription', "A command-line utility written in Python that enables cluster administrators to bootstrap and manage the big data cluster via REST APIs");
}
get type(): ToolType {
@@ -21,6 +21,6 @@ export class AzdataTool implements ITool {
}
get displayName(): string {
return localize('resourceDeployment.AzdataDisplayName', 'azdata');
return localize('resourceDeployment.AzdataDisplayName', "azdata");
}
}