mirror of
https://github.com/ckaczor/azuredatastudio.git
synced 2026-01-14 01:25:37 -05:00
big data cluster -> Big Data Cluster (#7536)
This commit is contained in:
@@ -215,7 +215,7 @@ We would like to thank all our users who raised issues, and in particular the fo
|
||||
|
||||
## What's new in this version
|
||||
* Announcing the SQL Server 2019 Preview extension.
|
||||
* Support for SQL Server 2019 preview features including big data cluster support.
|
||||
* Support for SQL Server 2019 preview features including Big Data Cluster support.
|
||||
* Azure Data Studio Notebooks
|
||||
* The Azure Resource Explorer viewlets you browse data-related endpoints for your Azure accounts and create connections to them in Object Explorer. In this release Azure SQL Databases and servers are supported.
|
||||
* SQL Server Polybase Create External Table Wizard
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Microsoft SQL Server big data cluster Extension for Azure Data Studio
|
||||
# Microsoft SQL Server Big Data Cluster Extension for Azure Data Studio
|
||||
|
||||
Welcome to Microsoft SQL Server big data cluster Extension for Azure Data Studio!
|
||||
Welcome to Microsoft SQL Server Big Data Cluster Extension for Azure Data Studio!
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"description": "Support for managing SQL Server big data clusters",
|
||||
"text.sqlServerBigDataClusters": "SQL Server big data clusters",
|
||||
"description": "Support for managing SQL Server Big Data Clusters",
|
||||
"text.sqlServerBigDataClusters": "SQL Server Big Data Clusters",
|
||||
"command.addController.title": "Connect to Controller",
|
||||
"command.deleteController.title" : "Delete",
|
||||
"command.refreshController.title" : "Refresh",
|
||||
|
||||
@@ -17,7 +17,7 @@ export class AddControllerNode extends TreeNode {
|
||||
private readonly nodeType: string;
|
||||
|
||||
constructor() {
|
||||
super(localize('textBigDataClusterControllerWithDots', "Add SQL Server big data cluster controller..."));
|
||||
super(localize('textBigDataClusterControllerWithDots', "Add SQL Server Big Data Cluster controller..."));
|
||||
this.nodeType = BdcItemType.addController;
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
"|Tools|Description|Installation|\n",
|
||||
"|---|---|---|\n",
|
||||
"|kubectl | Command-line tool for monitoring the underlying Kubernetes cluster | [Installation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-using-native-package-management) |\n",
|
||||
"|azdata | Command-line tool for installing and managing a big data cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |\n",
|
||||
"|azdata | Command-line tool for installing and managing a Big Data Cluster |[Installation](https://docs.microsoft.com/en-us/sql/big-data-cluster/deploy-install-azdata?view=sqlallproducts-allversions) |\n",
|
||||
"|Pandas Package | Python package for data manipulation | Will be installed by the notebook if not present |\n",
|
||||
"\n",
|
||||
"\n",
|
||||
@@ -123,7 +123,7 @@
|
||||
"- **Type** the login info\r\n",
|
||||
"- **Press** enter.\r\n",
|
||||
"\r\n",
|
||||
"If your cluster is missing a configuration file, you will be asked to provide your controller endpoint. (Format: **https://00.00.00.000:00000**) You can find the controller endpoint from the big data cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\r\n",
|
||||
"If your cluster is missing a configuration file, you will be asked to provide your controller endpoint. (Format: **https://00.00.00.000:00000**) You can find the controller endpoint from the Big Data Cluster dashboard in the Service Endpoints table. The endpoint is listed as **Cluster Management Service.**\r\n",
|
||||
""
|
||||
],
|
||||
"metadata": {}
|
||||
@@ -154,7 +154,7 @@
|
||||
"print('***********')\n",
|
||||
"\n",
|
||||
"!azdata logout\n",
|
||||
"# Login in to your big data cluster \n",
|
||||
"# Login in to your Big Data Cluster \n",
|
||||
"cmd = f'azdata login -n {cluster_name} -u {controller_username} -a yes'\n",
|
||||
"print(\"Start \" + cmd)\n",
|
||||
"os.environ['CONTROLLER_USERNAME'] = controller_username\n",
|
||||
@@ -244,7 +244,7 @@
|
||||
" mydata = HTML(df.to_html(render_links=True))\n",
|
||||
" display(mydata)\n",
|
||||
" \n",
|
||||
"# Display status of big data cluster\n",
|
||||
"# Display status of Big Data Cluster\n",
|
||||
"results = !azdata bdc status show -o json\n",
|
||||
"show_results(results)"
|
||||
],
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
"notebook.command.new": "New Notebook",
|
||||
"notebook.command.open": "Open Notebook",
|
||||
|
||||
"tab.bigDataClusterDescription": "Tasks and information about your SQL Server big data cluster",
|
||||
"title.bigDataCluster": "SQL Server big data cluster",
|
||||
"tab.bigDataClusterDescription": "Tasks and information about your SQL Server Big Data Cluster",
|
||||
"title.bigDataCluster": "SQL Server Big Data Cluster",
|
||||
"title.submitSparkJob": "Submit Spark Job",
|
||||
"title.newSparkJob": "New Spark Job",
|
||||
"title.openSparkHistory": "View Spark History",
|
||||
|
||||
@@ -46,4 +46,4 @@ export function sparkJobSubmissionYarnUIMessage(yarnUIURL: string): string { ret
|
||||
export function sparkJobSubmissionSparkHistoryLinkMessage(sparkHistoryLink: string): string { return localize('sparkJobSubmission_SparkHistoryLinkMessage', 'Spark History Url: {0} ', sparkHistoryLink); }
|
||||
export function sparkJobSubmissionGetApplicationIdFailed(err: string): string { return localize('sparkJobSubmission_GetApplicationIdFailed', 'Get Application Id Failed. {0}', err); }
|
||||
export function sparkJobSubmissionLocalFileNotExisted(path: string): string { return localize('sparkJobSubmission_LocalFileNotExisted', 'Local file {0} does not existed. ', path); }
|
||||
export const sparkJobSubmissionNoSqlBigDataClusterFound = localize('sparkJobSubmission_NoSqlBigDataClusterFound', 'No Sql Server big data cluster found.');
|
||||
export const sparkJobSubmissionNoSqlBigDataClusterFound = localize('sparkJobSubmission_NoSqlBigDataClusterFound', 'No SQL Server Big Data Cluster found.');
|
||||
|
||||
@@ -77,7 +77,7 @@ export class OpenSparkJobSubmissionDialogCommand extends Command {
|
||||
selectedHost = await vscode.window.showQuickPick(displayList, {
|
||||
placeHolder:
|
||||
localize('sparkJobSubmission_PleaseSelectSqlWithCluster',
|
||||
"Please select SQL Server with big data cluster.")
|
||||
"Please select SQL Server with Big Data Cluster.")
|
||||
});
|
||||
if (selectedHost === selectConnectionMsg) {
|
||||
showConnectionDialog = true;
|
||||
@@ -107,7 +107,7 @@ export class OpenSparkJobSubmissionDialogCommand extends Command {
|
||||
|
||||
let sqlClusterConnection = await SqlClusterLookUp.getSqlClusterConnection(sqlConnection);
|
||||
if (!sqlClusterConnection) {
|
||||
throw new Error(localize('errorNotSqlBigDataCluster', "The selected server does not belong to a SQL Server big data cluster"));
|
||||
throw new Error(localize('errorNotSqlBigDataCluster', "The selected server does not belong to a SQL Server Big Data Cluster"));
|
||||
}
|
||||
|
||||
return new SqlClusterConnection(sqlClusterConnection);
|
||||
|
||||
@@ -246,7 +246,7 @@ export class JupyterSession implements nb.ISession {
|
||||
if (connection.providerName === SQL_PROVIDER) {
|
||||
let clusterEndpoint: utils.IEndpoint = await this.getClusterEndpoint(connection.id, KNOX_ENDPOINT_GATEWAY);
|
||||
if (!clusterEndpoint) {
|
||||
return Promise.reject(new Error(localize('connectionNotValid', "Spark kernels require a connection to a SQL Server big data cluster master instance.")));
|
||||
return Promise.reject(new Error(localize('connectionNotValid', "Spark kernels require a connection to a SQL Server Big Data Cluster master instance.")));
|
||||
}
|
||||
let hostAndPort = utils.getHostAndPortFromEndpoint(clusterEndpoint.endpoint);
|
||||
connection.options[KNOX_ENDPOINT_SERVER] = hostAndPort.host;
|
||||
|
||||
@@ -22,7 +22,7 @@ export class AzdataTool extends ToolBase {
|
||||
}
|
||||
|
||||
get description(): string {
|
||||
return localize('resourceDeployment.AzdataDescription', "A command-line utility written in Python that enables cluster administrators to bootstrap and manage the big data cluster via REST APIs");
|
||||
return localize('resourceDeployment.AzdataDescription', "A command-line utility written in Python that enables cluster administrators to bootstrap and manage the Big Data Cluster via REST APIs");
|
||||
}
|
||||
|
||||
get type(): ToolType {
|
||||
|
||||
Reference in New Issue
Block a user