mirror of
https://github.com/ckaczor/azuredatastudio.git
synced 2026-02-16 10:58:30 -05:00
Initial AD support for BDCs (#6741)
Partially working AD support for BDCs with some known issues - Plumbed through kerberos support to Notebooks. - Using "gateway-0" for service temporarily as service endpoints API doesn't yet return correct DNS name. Will update in separate PR once available - Plumbed kerberos auth to HDFS, Spark. Only partially working as we use same token on each call - Will fix in separate PR, as this requires a refactor of WebHDFS library. Will need to either get new token every time or set a cookie, both of which require refactors - Fixed error when Data Service node expansion failed and blocked all OE expansion - Support for SqlToolsService change to use new cluster endpoints DMV - Updated API to add new endpoints field to replace IP + port - Added logic to handle case where endpoints for Yarn, Grafana etc. are in the list - Sort list and use expected new localized strings - Updated SqlToolsService to include support for new DMV - Add "gateway-0" handling in Jupyter session as workaround for lack of domain names in endpoints list
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"downloadUrl": "https://github.com/Microsoft/sqltoolsservice/releases/download/v{#version#}/microsoft.sqltools.servicelayer-{#fileName#}",
|
||||
"version": "2.0.0-release.2",
|
||||
"version": "2.0.0-release.4",
|
||||
"downloadFileNames": {
|
||||
"Windows_86": "win-x86-netcoreapp2.2.zip",
|
||||
"Windows_64": "win-x64-netcoreapp2.2.zip",
|
||||
|
||||
@@ -13,9 +13,10 @@ export const extensionConfigSectionName = 'mssql';
|
||||
|
||||
// DATA PROTOCOL VALUES ///////////////////////////////////////////////////////////
|
||||
export const mssqlClusterProviderName = 'mssqlCluster';
|
||||
export const hadoopEndpointNameKnox = 'Knox';
|
||||
export const hadoopEndpointNameGateway = 'gateway';
|
||||
export const protocolVersion = '1.0';
|
||||
export const authenticationTypePropName = 'authenticationType';
|
||||
export const integratedAuth = 'integrated';
|
||||
export const hostPropName = 'host';
|
||||
export const userPropName = 'user';
|
||||
export const knoxPortPropName = 'knoxport';
|
||||
@@ -26,8 +27,6 @@ export const groupIdName = 'groupId';
|
||||
export const sqlProviderName = 'MSSQL';
|
||||
export const dataService = 'Data Services';
|
||||
|
||||
export const hdfsHost = 'host';
|
||||
export const hdfsUser = 'user';
|
||||
export const UNTITLED_SCHEMA = 'untitled';
|
||||
|
||||
export const hadoopConnectionTimeoutSeconds = 15;
|
||||
|
||||
@@ -8,46 +8,74 @@ import * as azdata from 'azdata';
|
||||
import * as nls from 'vscode-nls';
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
import * as Utils from '../utils';
|
||||
import * as utils from '../utils';
|
||||
|
||||
const mgmtProxyName = 'mgmtproxy';
|
||||
const grafanaEndpointName = 'metricsui';
|
||||
const grafanaDescription = localize('grafana', "Metrics Dashboard");
|
||||
const logsuiEndpointName = 'logsui';
|
||||
const logsuiDescription = localize('kibana', "Log Search Dashboard");
|
||||
const sparkHistoryEndpointName = 'spark-history';
|
||||
const sparkHistoryDescription = localize('sparkHistory', "Spark Jobs Management and Monitoring Dashboard");
|
||||
const yarnUiEndpointName = 'yarn-ui';
|
||||
const yarnHistoryDescription = localize('yarnHistory', "Spark Diagnostics and Monitoring Dashboard");
|
||||
const hyperlinkedEndpoints = [grafanaEndpointName, logsuiEndpointName, sparkHistoryEndpointName, yarnUiEndpointName];
|
||||
|
||||
export function registerServiceEndpoints(context: vscode.ExtensionContext): void {
|
||||
azdata.ui.registerModelViewProvider('bdc-endpoints', async (view) => {
|
||||
let endpointsArray: Array<utils.IEndpoint> = Object.assign([], utils.getClusterEndpoints(view.serverInfo));
|
||||
|
||||
const endpointsArray: Array<Utils.IEndpoint> = Object.assign([], view.serverInfo.options['clusterEndpoints']);
|
||||
endpointsArray.forEach(endpointInfo => {
|
||||
endpointInfo.hyperlink = 'https://' + endpointInfo.ipAddress + ':' + endpointInfo.port;
|
||||
|
||||
});
|
||||
if (endpointsArray.length > 0) {
|
||||
const managementProxyEp = endpointsArray.find(e => e.serviceName === 'management-proxy' || e.serviceName === 'mgmtproxy');
|
||||
if (managementProxyEp) {
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, localize("grafana", "Metrics Dashboard"), '/grafana/d/wZx3OUdmz'));
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, localize("kibana", "Log Search Dashboard"), '/kibana/app/kibana#/discover'));
|
||||
const grafanaEp = endpointsArray.find(e => e.serviceName === grafanaEndpointName);
|
||||
if (grafanaEp) {
|
||||
// Update to have correct URL
|
||||
grafanaEp.endpoint += '/d/wZx3OUdmz';
|
||||
}
|
||||
const kibanaEp = endpointsArray.find(e => e.serviceName === logsuiEndpointName);
|
||||
if (kibanaEp) {
|
||||
// Update to have correct URL
|
||||
kibanaEp.endpoint += '/app/kibana#/discover';
|
||||
}
|
||||
|
||||
const gatewayEp = endpointsArray.find(e => e.serviceName === 'gateway');
|
||||
if (gatewayEp) {
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, localize("sparkHostory", "Spark Job Monitoring"), '/gateway/default/sparkhistory'));
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, localize("yarnHistory", "Spark Resource Management"), '/gateway/default/yarn'));
|
||||
if (!grafanaEp) {
|
||||
// We are on older CTP, need to manually add some endpoints.
|
||||
// TODO remove once CTP support goes away
|
||||
const managementProxyEp = endpointsArray.find(e => e.serviceName === mgmtProxyName);
|
||||
if (managementProxyEp) {
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, grafanaEndpointName, grafanaDescription, '/grafana/d/wZx3OUdmz'));
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, logsuiEndpointName, logsuiDescription, '/kibana/app/kibana#/discover'));
|
||||
}
|
||||
|
||||
const gatewayEp = endpointsArray.find(e => e.serviceName === 'gateway');
|
||||
if (gatewayEp) {
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, sparkHistoryEndpointName, sparkHistoryDescription, '/gateway/default/sparkhistory'));
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, yarnUiEndpointName, yarnHistoryDescription, '/gateway/default/yarn'));
|
||||
}
|
||||
}
|
||||
|
||||
endpointsArray = endpointsArray.map(e => {
|
||||
e.description = getFriendlyEndpointNames(e);
|
||||
return e;
|
||||
}).sort((a, b) => a.endpoint.localeCompare(b.endpoint));
|
||||
|
||||
const container = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'column', width: '100%', height: '100%', alignItems: 'left' }).component();
|
||||
endpointsArray.forEach(endpointInfo => {
|
||||
|
||||
const endPointRow = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'row' }).component();
|
||||
const nameCell = view.modelBuilder.text().withProperties<azdata.TextComponentProperties>({ value: getFriendlyEndpointNames(endpointInfo.serviceName) }).component();
|
||||
const nameCell = view.modelBuilder.text().withProperties<azdata.TextComponentProperties>({ value: endpointInfo.description }).component();
|
||||
endPointRow.addItem(nameCell, { CSSStyles: { 'width': '35%', 'font-weight': '600', 'user-select': 'text' } });
|
||||
if (endpointInfo.isHyperlink) {
|
||||
const linkCell = view.modelBuilder.hyperlink().withProperties<azdata.HyperlinkComponentProperties>({ label: endpointInfo.hyperlink, url: endpointInfo.hyperlink }).component();
|
||||
if (hyperlinkedEndpoints.findIndex(e => e === endpointInfo.serviceName) >= 0) {
|
||||
const linkCell = view.modelBuilder.hyperlink().withProperties<azdata.HyperlinkComponentProperties>({ label: endpointInfo.endpoint, url: endpointInfo.endpoint }).component();
|
||||
endPointRow.addItem(linkCell, { CSSStyles: { 'width': '62%', 'color': '#0078d4', 'text-decoration': 'underline', 'padding-top': '10px' } });
|
||||
}
|
||||
else {
|
||||
const endpointCell = view.modelBuilder.text().withProperties<azdata.TextComponentProperties>({ value: endpointInfo.ipAddress + ':' + endpointInfo.port }).component();
|
||||
const endpointCell = view.modelBuilder.text().withProperties<azdata.TextComponentProperties>({ value: endpointInfo.endpoint }).component();
|
||||
endPointRow.addItem(endpointCell, { CSSStyles: { 'width': '62%', 'user-select': 'text' } });
|
||||
}
|
||||
const copyValueCell = view.modelBuilder.button().component();
|
||||
copyValueCell.iconPath = { light: context.asAbsolutePath('resources/light/copy.png'), dark: context.asAbsolutePath('resources/dark/copy_inverse.png') };
|
||||
copyValueCell.onDidClick(() => {
|
||||
vscode.env.clipboard.writeText(endpointInfo.hyperlink);
|
||||
vscode.env.clipboard.writeText(endpointInfo.endpoint);
|
||||
});
|
||||
copyValueCell.title = localize("copyText", "Copy");
|
||||
copyValueCell.iconHeight = '14px';
|
||||
@@ -64,40 +92,57 @@ export function registerServiceEndpoints(context: vscode.ExtensionContext): void
|
||||
});
|
||||
}
|
||||
|
||||
function getCustomEndpoint(parentEndpoint: Utils.IEndpoint, serviceName: string, serviceUrl?: string): Utils.IEndpoint {
|
||||
function getCustomEndpoint(parentEndpoint: utils.IEndpoint, serviceName: string, description: string, serviceUrl?: string): utils.IEndpoint {
|
||||
if (parentEndpoint) {
|
||||
let endpoint: Utils.IEndpoint = {
|
||||
let endpoint: utils.IEndpoint = {
|
||||
serviceName: serviceName,
|
||||
ipAddress: parentEndpoint.ipAddress,
|
||||
port: parentEndpoint.port,
|
||||
isHyperlink: serviceUrl ? true : false,
|
||||
hyperlink: 'https://' + parentEndpoint.ipAddress + ':' + parentEndpoint.port + serviceUrl
|
||||
description: description,
|
||||
endpoint: parentEndpoint.endpoint + serviceUrl,
|
||||
protocol: 'https'
|
||||
};
|
||||
return endpoint;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function getFriendlyEndpointNames(name: string): string {
|
||||
let friendlyName: string = name;
|
||||
switch (name) {
|
||||
function getFriendlyEndpointNames(endpointInfo: utils.IEndpoint): string {
|
||||
let friendlyName: string = endpointInfo.description || endpointInfo.serviceName;
|
||||
switch (endpointInfo.serviceName) {
|
||||
case 'app-proxy':
|
||||
friendlyName = localize("appproxy", "Application Proxy");
|
||||
friendlyName = localize('approxy.description', "Application Proxy");
|
||||
break;
|
||||
case 'controller':
|
||||
friendlyName = localize("controller", "Cluster Management Service");
|
||||
friendlyName = localize('controller.description', "Cluster Management Service");
|
||||
break;
|
||||
case 'gateway':
|
||||
friendlyName = localize("gateway", "HDFS and Spark");
|
||||
friendlyName = localize('gateway.description', "HDFS and Spark");
|
||||
break;
|
||||
case 'management-proxy':
|
||||
friendlyName = localize("managementproxy", "Management Proxy");
|
||||
case mgmtProxyName:
|
||||
friendlyName = localize('mgmtproxy.description', "Management Proxy");
|
||||
break;
|
||||
case 'mgmtproxy':
|
||||
friendlyName = localize("mgmtproxy", "Management Proxy");
|
||||
case logsuiEndpointName:
|
||||
friendlyName = logsuiDescription;
|
||||
break;
|
||||
case grafanaEndpointName:
|
||||
friendlyName = grafanaDescription;
|
||||
break;
|
||||
case sparkHistoryEndpointName:
|
||||
friendlyName = sparkHistoryDescription;
|
||||
break;
|
||||
case yarnUiEndpointName:
|
||||
friendlyName = yarnHistoryDescription;
|
||||
break;
|
||||
case 'sql-server-master':
|
||||
friendlyName = localize('sqlmaster.description', "SQL Server Master Instance Front-End");
|
||||
break;
|
||||
case 'webhdfs':
|
||||
friendlyName = localize('webhdfs.description', "HDFS File System Proxy");
|
||||
break;
|
||||
case 'livy':
|
||||
friendlyName = localize('livy.description', "Proxy for running Spark statements, jobs, applications");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return friendlyName;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ export class SqlClusterConnection {
|
||||
this._connection = this.toConnection(this._profile);
|
||||
} else {
|
||||
this._connection = connectionInfo;
|
||||
this._profile = this.toConnectionProfile(this._connection);
|
||||
}
|
||||
this._host = this._connection.options[constants.hostPropName];
|
||||
this._port = this._connection.options[constants.knoxPortPropName];
|
||||
@@ -35,7 +34,6 @@ export class SqlClusterConnection {
|
||||
}
|
||||
|
||||
public get connection(): azdata.connection.Connection { return this._connection; }
|
||||
public get profile(): azdata.IConnectionProfile { return this._profile; }
|
||||
public get host(): string { return this._host; }
|
||||
public get port(): number { return this._port ? Number.parseInt(this._port) : constants.defaultKnoxPort; }
|
||||
public get user(): string { return this._user; }
|
||||
@@ -50,7 +48,7 @@ export class SqlClusterConnection {
|
||||
.every(e => options1[e] === options2[e]);
|
||||
}
|
||||
|
||||
public createHdfsFileSource(): IFileSource {
|
||||
public async createHdfsFileSource(): Promise<IFileSource> {
|
||||
let options: IHdfsOptions = {
|
||||
protocol: 'https',
|
||||
host: this.host,
|
||||
@@ -58,13 +56,24 @@ export class SqlClusterConnection {
|
||||
user: this.user,
|
||||
path: 'gateway/default/webhdfs/v1',
|
||||
requestParams: {
|
||||
auth: {
|
||||
user: this.user,
|
||||
pass: this.password
|
||||
}
|
||||
}
|
||||
};
|
||||
return FileSourceFactory.instance.createHdfsFileSource(options);
|
||||
if (this.isIntegratedAuth()) {
|
||||
options.requestParams.isKerberos = this.isIntegratedAuth();
|
||||
options.requestParams.auth = undefined;
|
||||
} else {
|
||||
options.requestParams.auth = {
|
||||
user: this.user,
|
||||
pass: this.password
|
||||
};
|
||||
}
|
||||
let fileSource = await FileSourceFactory.instance.createHdfsFileSource(options);
|
||||
return fileSource;
|
||||
}
|
||||
|
||||
public isIntegratedAuth(): boolean {
|
||||
let authType: string = this._connection.options[constants.authenticationTypePropName];
|
||||
return authType && authType.toLowerCase() === constants.integratedAuth;
|
||||
}
|
||||
|
||||
public updatePassword(password: string): void {
|
||||
@@ -90,10 +99,12 @@ export class SqlClusterConnection {
|
||||
|
||||
private getMissingProperties(connectionInfo: azdata.ConnectionInfo): string[] {
|
||||
if (!connectionInfo || !connectionInfo.options) { return undefined; }
|
||||
return [
|
||||
constants.hostPropName, constants.knoxPortPropName,
|
||||
constants.userPropName, constants.passwordPropName
|
||||
].filter(e => connectionInfo.options[e] === undefined);
|
||||
let requiredProps = [constants.hostPropName, constants.knoxPortPropName];
|
||||
let authType = connectionInfo.options[constants.authenticationTypePropName] && connectionInfo.options[constants.authenticationTypePropName].toLowerCase();
|
||||
if (authType !== constants.integratedAuth) {
|
||||
requiredProps.push(constants.userPropName, constants.passwordPropName);
|
||||
}
|
||||
return requiredProps.filter(e => connectionInfo.options[e] === undefined);
|
||||
}
|
||||
|
||||
private toConnection(connProfile: azdata.IConnectionProfile): azdata.connection.Connection {
|
||||
@@ -101,18 +112,4 @@ export class SqlClusterConnection {
|
||||
{ connectionId: this._profile.id });
|
||||
return connection;
|
||||
}
|
||||
|
||||
private toConnectionProfile(connectionInfo: azdata.connection.Connection): azdata.IConnectionProfile {
|
||||
let options = connectionInfo.options;
|
||||
let connProfile: azdata.IConnectionProfile = Object.assign(<azdata.IConnectionProfile>{},
|
||||
connectionInfo,
|
||||
{
|
||||
serverName: `${options[constants.hostPropName]},${options[constants.knoxPortPropName]}`,
|
||||
userName: options[constants.userPropName],
|
||||
password: options[constants.passwordPropName],
|
||||
id: connectionInfo.connectionId,
|
||||
}
|
||||
);
|
||||
return connProfile;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import * as nls from 'vscode-nls';
|
||||
|
||||
import * as constants from '../constants';
|
||||
import { WebHDFS, HdfsError } from './webhdfs';
|
||||
import * as auth from '../util/auth';
|
||||
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
@@ -84,11 +85,13 @@ export interface IHdfsOptions {
|
||||
|
||||
export interface IRequestParams {
|
||||
auth?: IHttpAuthentication;
|
||||
isKerberos?: boolean;
|
||||
/**
|
||||
* Timeout in milliseconds to wait for response
|
||||
*/
|
||||
timeout?: number;
|
||||
agent?: https.Agent;
|
||||
headers?: {};
|
||||
}
|
||||
|
||||
export interface IHdfsFileStatus {
|
||||
@@ -106,10 +109,10 @@ export class FileSourceFactory {
|
||||
return FileSourceFactory._instance;
|
||||
}
|
||||
|
||||
public createHdfsFileSource(options: IHdfsOptions): IFileSource {
|
||||
public async createHdfsFileSource(options: IHdfsOptions): Promise<IFileSource> {
|
||||
options = options && options.host ? FileSourceFactory.removePortFromHost(options) : options;
|
||||
let requestParams: IRequestParams = options.requestParams ? options.requestParams : {};
|
||||
if (requestParams.auth) {
|
||||
if (requestParams.auth || requestParams.isKerberos) {
|
||||
// TODO Remove handling of unsigned cert once we have real certs in our Knox service
|
||||
let agentOptions = {
|
||||
host: options.host,
|
||||
@@ -119,6 +122,11 @@ export class FileSourceFactory {
|
||||
};
|
||||
let agent = new https.Agent(agentOptions);
|
||||
requestParams['agent'] = agent;
|
||||
|
||||
}
|
||||
if (requestParams.isKerberos) {
|
||||
let kerberosToken = await auth.authenticateKerberos(options.host);
|
||||
requestParams.headers = { Authorization: `Negotiate ${kerberosToken}` };
|
||||
}
|
||||
return new HdfsFileSource(WebHDFS.createClient(options, requestParams));
|
||||
}
|
||||
|
||||
@@ -63,9 +63,9 @@ export class HdfsProvider implements vscode.TreeDataProvider<TreeNode>, ITreeCha
|
||||
}
|
||||
}
|
||||
|
||||
addHdfsConnection(options: IHdfsOptions): void {
|
||||
public async addHdfsConnection(options: IHdfsOptions): Promise<void> {
|
||||
let displayName = `${options.user}@${options.host}:${options.port}`;
|
||||
let fileSource = FileSourceFactory.instance.createHdfsFileSource(options);
|
||||
let fileSource = await FileSourceFactory.instance.createHdfsFileSource(options);
|
||||
this.addConnection(displayName, fileSource);
|
||||
}
|
||||
|
||||
|
||||
@@ -120,10 +120,10 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
if (children.length === 1 && this.hasExpansionError(children)) {
|
||||
if (children[0].errorStatusCode === 401) {
|
||||
//Prompt for password
|
||||
let password: string = await this.promptPassword(localize('prmptPwd', 'Please provide the password to connect to HDFS:'));
|
||||
let password: string = await this.promptPassword(localize('prmptPwd', "Please provide the password to connect to HDFS:"));
|
||||
if (password && password.length > 0) {
|
||||
session.sqlClusterConnection.updatePassword(password);
|
||||
node.updateFileSource(session.sqlClusterConnection);
|
||||
await node.updateFileSource(session.sqlClusterConnection);
|
||||
children = await node.getChildren(true);
|
||||
}
|
||||
}
|
||||
@@ -181,7 +181,7 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
try {
|
||||
let session = this.getSqlClusterSessionForNode(node);
|
||||
if (!session) {
|
||||
this.appContext.apiWrapper.showErrorMessage(localize('sessionNotFound', 'Session for node {0} does not exist', node.nodePathValue));
|
||||
this.appContext.apiWrapper.showErrorMessage(localize('sessionNotFound', "Session for node {0} does not exist", node.nodePathValue));
|
||||
} else {
|
||||
let nodeInfo = node.getNodeInfo();
|
||||
let expandInfo: azdata.ExpandNodeInfo = {
|
||||
@@ -191,7 +191,7 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
await this.refreshNode(expandInfo);
|
||||
}
|
||||
} catch (err) {
|
||||
mssqlOutputChannel.appendLine(localize('notifyError', 'Error notifying of node change: {0}', err));
|
||||
mssqlOutputChannel.appendLine(localize('notifyError', "Error notifying of node change: {0}", err));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +295,7 @@ class SqlClusterRootNode extends TreeNode {
|
||||
|
||||
getNodeInfo(): azdata.NodeInfo {
|
||||
let nodeInfo: azdata.NodeInfo = {
|
||||
label: localize('rootLabel', 'Root'),
|
||||
label: localize('rootLabel', "Root"),
|
||||
isLeaf: false,
|
||||
errorMessage: undefined,
|
||||
metadata: undefined,
|
||||
@@ -325,22 +325,27 @@ class DataServicesNode extends TreeNode {
|
||||
|
||||
public getChildren(refreshChildren: boolean): TreeNode[] | Promise<TreeNode[]> {
|
||||
if (refreshChildren || !this._children) {
|
||||
this._children = [];
|
||||
let fileSource: IFileSource = this.session.sqlClusterConnection.createHdfsFileSource();
|
||||
let hdfsNode = new ConnectionNode(this._context, localize('hdfsFolder', 'HDFS'), fileSource);
|
||||
hdfsNode.parent = this;
|
||||
this._children.push(hdfsNode);
|
||||
return this.refreshChildren();
|
||||
}
|
||||
return this._children;
|
||||
}
|
||||
|
||||
private async refreshChildren(): Promise<TreeNode[]> {
|
||||
this._children = [];
|
||||
let fileSource: IFileSource = await this.session.sqlClusterConnection.createHdfsFileSource();
|
||||
let hdfsNode = new ConnectionNode(this._context, localize('hdfsFolder', "HDFS"), fileSource);
|
||||
hdfsNode.parent = this;
|
||||
this._children.push(hdfsNode);
|
||||
return this._children;
|
||||
}
|
||||
|
||||
getTreeItem(): vscode.TreeItem | Promise<vscode.TreeItem> {
|
||||
throw new Error('Not intended for use in a file explorer view.');
|
||||
}
|
||||
|
||||
getNodeInfo(): azdata.NodeInfo {
|
||||
let nodeInfo: azdata.NodeInfo = {
|
||||
label: localize('dataServicesLabel', 'Data Services'),
|
||||
label: localize('dataServicesLabel', "Data Services"),
|
||||
isLeaf: false,
|
||||
errorMessage: undefined,
|
||||
metadata: undefined,
|
||||
@@ -352,4 +357,4 @@ class DataServicesNode extends TreeNode {
|
||||
};
|
||||
return nodeInfo;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,8 +78,8 @@ export abstract class TreeNode implements ITreeNode {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
public updateFileSource(connection: SqlClusterConnection): void {
|
||||
this.fileSource = connection.createHdfsFileSource();
|
||||
public async updateFileSource(connection: SqlClusterConnection): Promise<void> {
|
||||
this.fileSource = await connection.createHdfsFileSource();
|
||||
}
|
||||
/**
|
||||
* The value to use for this node in the node path
|
||||
|
||||
@@ -482,15 +482,17 @@ export class WebHDFS {
|
||||
|
||||
let stream = undefined;
|
||||
let canResume: boolean = true;
|
||||
let params = Object.assign(
|
||||
let params: any = Object.assign(
|
||||
{
|
||||
method: append ? 'POST' : 'PUT',
|
||||
url: endpoint,
|
||||
json: true,
|
||||
headers: { 'content-type': 'application/octet-stream' }
|
||||
},
|
||||
this._requestParams
|
||||
);
|
||||
params.headers = params.headers || {};
|
||||
params.headers['content-type'] = 'application/octet-stream';
|
||||
|
||||
|
||||
let req = request(params, (error, response, body) => {
|
||||
// Handle redirect only if there was not an error (e.g. res is defined)
|
||||
|
||||
@@ -147,7 +147,7 @@ export class SparkJobSubmissionModel {
|
||||
return Promise.reject(LocalizedConstants.sparkJobSubmissionLocalFileNotExisted(localFilePath));
|
||||
}
|
||||
|
||||
let fileSource: IFileSource = this._sqlClusterConnection.createHdfsFileSource();
|
||||
let fileSource: IFileSource = await this._sqlClusterConnection.createHdfsFileSource();
|
||||
await fileSource.writeFile(new File(localFilePath, false), hdfsFolderPath);
|
||||
} catch (error) {
|
||||
return Promise.reject(error);
|
||||
@@ -160,7 +160,7 @@ export class SparkJobSubmissionModel {
|
||||
return Promise.reject(localize('sparkJobSubmission_PathNotSpecified.', 'Property Path is not specified. '));
|
||||
}
|
||||
|
||||
let fileSource: IFileSource = this._sqlClusterConnection.createHdfsFileSource();
|
||||
let fileSource: IFileSource = await this._sqlClusterConnection.createHdfsFileSource();
|
||||
return await fileSource.exists(path);
|
||||
} catch (error) {
|
||||
return Promise.reject(error);
|
||||
|
||||
@@ -11,6 +11,7 @@ const localize = nls.loadMessageBundle();
|
||||
import * as constants from '../../../constants';
|
||||
import { SqlClusterConnection } from '../../../objectExplorerNodeProvider/connection';
|
||||
import * as utils from '../../../utils';
|
||||
import * as auth from '../../../util/auth';
|
||||
|
||||
export class SparkJobSubmissionService {
|
||||
private _requestPromise: (args: any) => any;
|
||||
@@ -28,6 +29,10 @@ export class SparkJobSubmissionService {
|
||||
public async submitBatchJob(submissionArgs: SparkJobSubmissionInput): Promise<string> {
|
||||
try {
|
||||
let livyUrl: string = `https://${submissionArgs.host}:${submissionArgs.port}${submissionArgs.livyPath}/`;
|
||||
|
||||
// Get correct authentication headers
|
||||
let headers = await this.getAuthenticationHeaders(submissionArgs);
|
||||
|
||||
let options = {
|
||||
uri: livyUrl,
|
||||
method: 'POST',
|
||||
@@ -41,9 +46,7 @@ export class SparkJobSubmissionService {
|
||||
name: submissionArgs.jobName
|
||||
},
|
||||
// authentication headers
|
||||
headers: {
|
||||
'Authorization': 'Basic ' + Buffer.from(submissionArgs.user + ':' + submissionArgs.password).toString('base64')
|
||||
}
|
||||
headers: headers
|
||||
};
|
||||
|
||||
// Set arguments
|
||||
@@ -90,18 +93,30 @@ export class SparkJobSubmissionService {
|
||||
}
|
||||
}
|
||||
|
||||
private async getAuthenticationHeaders(submissionArgs: SparkJobSubmissionInput) {
|
||||
let headers = {};
|
||||
if (submissionArgs.isIntegratedAuth) {
|
||||
let kerberosToken = await auth.authenticateKerberos(submissionArgs.host);
|
||||
headers = { Authorization: `Negotiate ${kerberosToken}` };
|
||||
}
|
||||
else {
|
||||
headers = { Authorization: 'Basic ' + Buffer.from(submissionArgs.user + ':' + submissionArgs.password).toString('base64') };
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
public async getYarnAppId(submissionArgs: SparkJobSubmissionInput, livyBatchId: string): Promise<LivyLogResponse> {
|
||||
try {
|
||||
let livyUrl = `https://${submissionArgs.host}:${submissionArgs.port}${submissionArgs.livyPath}/${livyBatchId}/log`;
|
||||
let headers = await this.getAuthenticationHeaders(submissionArgs);
|
||||
|
||||
let options = {
|
||||
uri: livyUrl,
|
||||
method: 'GET',
|
||||
json: true,
|
||||
rejectUnauthorized: false,
|
||||
// authentication headers
|
||||
headers: {
|
||||
'Authorization': 'Basic ' + Buffer.from(submissionArgs.user + ':' + submissionArgs.password).toString('base64')
|
||||
}
|
||||
headers: headers
|
||||
};
|
||||
|
||||
const response = await this._requestPromise(options);
|
||||
@@ -145,7 +160,8 @@ export class SparkJobSubmissionInput {
|
||||
this._port = sqlClusterConnection.port;
|
||||
this._livyPath = constants.mssqlClusterLivySubmitPath;
|
||||
this._user = sqlClusterConnection.user;
|
||||
this._passWord = sqlClusterConnection.password;
|
||||
this._password = sqlClusterConnection.password;
|
||||
this._isIntegratedAuth = sqlClusterConnection.isIntegratedAuth();
|
||||
}
|
||||
|
||||
constructor(
|
||||
@@ -160,7 +176,8 @@ export class SparkJobSubmissionInput {
|
||||
private _port?: number,
|
||||
private _livyPath?: string,
|
||||
private _user?: string,
|
||||
private _passWord?: string) {
|
||||
private _password?: string,
|
||||
private _isIntegratedAuth?: boolean) {
|
||||
}
|
||||
|
||||
public get jobName(): string { return this._jobName; }
|
||||
@@ -174,7 +191,8 @@ export class SparkJobSubmissionInput {
|
||||
public get port(): number { return this._port; }
|
||||
public get livyPath(): string { return this._livyPath; }
|
||||
public get user(): string { return this._user; }
|
||||
public get password(): string { return this._passWord; }
|
||||
public get password(): string { return this._password; }
|
||||
public get isIntegratedAuth(): boolean { return this._isIntegratedAuth; }
|
||||
}
|
||||
|
||||
export enum SparkFileSource {
|
||||
|
||||
@@ -5,12 +5,12 @@
|
||||
|
||||
import * as childProcess from 'child_process';
|
||||
import * as fs from 'fs-extra';
|
||||
import * as nls from 'vscode-nls';
|
||||
import * as path from 'path';
|
||||
import * as azdata from 'azdata';
|
||||
import * as vscode from 'vscode';
|
||||
import * as which from 'which';
|
||||
|
||||
import * as constants from '../constants';
|
||||
import * as nls from 'vscode-nls';
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
export function getDropdownValue(dropdownValue: string | azdata.CategoryValue): string {
|
||||
@@ -23,8 +23,8 @@ export function getDropdownValue(dropdownValue: string | azdata.CategoryValue):
|
||||
|
||||
export function getServerAddressFromName(connection: azdata.ConnectionInfo | string): string {
|
||||
// Strip TDS port number from the server URI
|
||||
if ((<azdata.ConnectionInfo>connection).options && (<azdata.ConnectionInfo>connection).options['host']) {
|
||||
return (<azdata.ConnectionInfo>connection).options['host'].split(',')[0].split(':')[0];
|
||||
if ((<azdata.ConnectionInfo>connection).options && (<azdata.ConnectionInfo>connection).options[constants.hostPropName]) {
|
||||
return (<azdata.ConnectionInfo>connection).options[constants.hostPropName].split(',')[0].split(':')[0];
|
||||
} else if ((<azdata.ConnectionInfo>connection).options && (<azdata.ConnectionInfo>connection).options['server']) {
|
||||
return (<azdata.ConnectionInfo>connection).options['server'].split(',')[0].split(':')[0];
|
||||
} else {
|
||||
|
||||
@@ -11,10 +11,9 @@ import * as UUID from 'vscode-languageclient/lib/utils/uuid';
|
||||
import { AppContext } from './appContext';
|
||||
import { SqlClusterConnection } from './objectExplorerNodeProvider/connection';
|
||||
import { ICommandObjectExplorerContext } from './objectExplorerNodeProvider/command';
|
||||
import { IEndpoint } from './utils';
|
||||
import { IEndpoint, getClusterEndpoints, getHostAndPortFromEndpoint } from './utils';
|
||||
import { MssqlObjectExplorerNodeProvider } from './objectExplorerNodeProvider/objectExplorerNodeProvider';
|
||||
|
||||
|
||||
export function findSqlClusterConnection(
|
||||
obj: ICommandObjectExplorerContext | azdata.IConnectionProfile,
|
||||
appContext: AppContext): SqlClusterConnection {
|
||||
@@ -76,14 +75,10 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
|
||||
let serverInfo = await azdata.connection.getServerInfo(connectionId);
|
||||
if (!serverInfo || !serverInfo.options) { return undefined; }
|
||||
|
||||
let endpoints: IEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty];
|
||||
let endpoints: IEndpoint[] = getClusterEndpoints(serverInfo);
|
||||
if (!endpoints || endpoints.length === 0) { return undefined; }
|
||||
|
||||
let index = endpoints.findIndex(ep => {
|
||||
let serviceName: string = ep.serviceName.toLowerCase();
|
||||
return serviceName === constants.hadoopEndpointNameKnox.toLowerCase() ||
|
||||
serviceName === constants.hadoopEndpointNameGateway.toLowerCase();
|
||||
});
|
||||
let index = endpoints.findIndex(ep => ep.serviceName.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
|
||||
if (index < 0) { return undefined; }
|
||||
|
||||
let credentials = await azdata.connection.getCredentials(connectionId);
|
||||
@@ -95,15 +90,27 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
|
||||
options: {}
|
||||
};
|
||||
|
||||
clusterConnInfo.options[constants.hostPropName] = endpoints[index].ipAddress;
|
||||
clusterConnInfo.options[constants.knoxPortPropName] = endpoints[index].port;
|
||||
clusterConnInfo.options[constants.userPropName] = 'root'; //should be the same user as sql master
|
||||
clusterConnInfo.options[constants.passwordPropName] = credentials.password;
|
||||
let hostAndIp = getHostAndPortFromEndpoint(endpoints[index].endpoint);
|
||||
clusterConnInfo.options[constants.hostPropName] = hostAndIp.host;
|
||||
// TODO should we default the port? Or just ignore later?
|
||||
clusterConnInfo.options[constants.knoxPortPropName] = hostAndIp.port || constants.defaultKnoxPort;
|
||||
let authType = clusterConnInfo.options[constants.authenticationTypePropName] = sqlConnInfo.options[constants.authenticationTypePropName];
|
||||
if (authType && authType.toLowerCase() !== constants.integratedAuth) {
|
||||
clusterConnInfo.options[constants.userPropName] = 'root'; //should be the same user as sql master
|
||||
clusterConnInfo.options[constants.passwordPropName] = credentials.password;
|
||||
} else {
|
||||
// Hack: for now, we need to use gateway-0 for integrated auth
|
||||
let sqlDnsName: string = sqlConnInfo.options['server'].split(',')[0];
|
||||
let parts = sqlDnsName.split('.');
|
||||
parts[0] = 'gateway-0';
|
||||
clusterConnInfo.options[constants.hostPropName] = parts.join('.');
|
||||
}
|
||||
clusterConnInfo = connToConnectionParam(clusterConnInfo);
|
||||
|
||||
return clusterConnInfo;
|
||||
}
|
||||
|
||||
|
||||
function connProfileToConnectionParam(connectionProfile: azdata.IConnectionProfile): ConnectionParam {
|
||||
let result = Object.assign(connectionProfile, { connectionId: connectionProfile.id });
|
||||
return <ConnectionParam>result;
|
||||
@@ -118,6 +125,7 @@ function connToConnectionParam(connection: azdata.connection.Connection): Connec
|
||||
userName: options[constants.userPropName],
|
||||
password: options[constants.passwordPropName],
|
||||
id: connectionId,
|
||||
authenticationType: options[constants.authenticationTypePropName]
|
||||
}
|
||||
);
|
||||
return <ConnectionParam>result;
|
||||
@@ -141,4 +149,4 @@ class ConnectionParam implements azdata.connection.Connection, azdata.IConnectio
|
||||
public connectionId: string;
|
||||
|
||||
public options: { [name: string]: any; };
|
||||
}
|
||||
}
|
||||
|
||||
14
extensions/mssql/src/util/auth.ts
Normal file
14
extensions/mssql/src/util/auth.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
/*---------------------------------------------------------------------------------------------
|
||||
* Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
* Licensed under the Source EULA. See License.txt in the project root for license information.
|
||||
*--------------------------------------------------------------------------------------------*/
|
||||
|
||||
import * as kerberos from 'kerberos';
|
||||
|
||||
export async function authenticateKerberos(hostname: string): Promise<string> {
|
||||
const service = 'HTTP' + (process.platform === 'win32' ? '/' : '@') + hostname;
|
||||
const mechOID = kerberos.GSS_MECH_OID_KRB5;
|
||||
let client = await kerberos.initializeClient(service, { mechOID });
|
||||
let response = await client.step('');
|
||||
return response;
|
||||
}
|
||||
@@ -232,36 +232,55 @@ export function getUserHome(): string {
|
||||
return process.env.HOME || process.env.USERPROFILE;
|
||||
}
|
||||
|
||||
export async function getClusterEndpoint(profileId: string, serviceName: string): Promise<IEndpoint> {
|
||||
export function getClusterEndpoints(serverInfo: azdata.ServerInfo): IEndpoint[] | undefined {
|
||||
let endpoints: RawEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty];
|
||||
if (!endpoints || endpoints.length === 0) { return []; }
|
||||
|
||||
let serverInfo: azdata.ServerInfo = await azdata.connection.getServerInfo(profileId);
|
||||
if (!serverInfo || !serverInfo.options) {
|
||||
return undefined;
|
||||
return endpoints.map(e => {
|
||||
// If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS
|
||||
let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`;
|
||||
let updatedEndpoint: IEndpoint = {
|
||||
serviceName: e.serviceName,
|
||||
description: e.description,
|
||||
endpoint: endpoint,
|
||||
protocol: e.protocol
|
||||
};
|
||||
return updatedEndpoint;
|
||||
});
|
||||
}
|
||||
|
||||
export type HostAndIp = { host: string, port: string };
|
||||
|
||||
export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp {
|
||||
let authority = vscode.Uri.parse(endpoint).authority;
|
||||
let hostAndPortRegex = /^(.*)([,:](\d+))/g;
|
||||
let match = hostAndPortRegex.exec(authority);
|
||||
if (match) {
|
||||
return {
|
||||
host: match[1],
|
||||
port: match[3]
|
||||
};
|
||||
}
|
||||
let endpoints: IEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty];
|
||||
if (!endpoints || endpoints.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
let index = endpoints.findIndex(ep => ep.serviceName === serviceName);
|
||||
if (index === -1) {
|
||||
return undefined;
|
||||
}
|
||||
let clusterEndpoint: IEndpoint = {
|
||||
serviceName: endpoints[index].serviceName,
|
||||
ipAddress: endpoints[index].ipAddress,
|
||||
port: endpoints[index].port,
|
||||
isHyperlink: false,
|
||||
hyperlink: null
|
||||
return {
|
||||
host: authority,
|
||||
port: undefined
|
||||
};
|
||||
return clusterEndpoint;
|
||||
}
|
||||
|
||||
interface RawEndpoint {
|
||||
serviceName: string;
|
||||
description?: string;
|
||||
endpoint?: string;
|
||||
protocol?: string;
|
||||
ipAddress?: string;
|
||||
port?: number;
|
||||
}
|
||||
|
||||
export interface IEndpoint {
|
||||
serviceName: string;
|
||||
ipAddress: string;
|
||||
port: number;
|
||||
isHyperlink: boolean;
|
||||
hyperlink: string;
|
||||
description: string;
|
||||
endpoint: string;
|
||||
protocol: string;
|
||||
}
|
||||
|
||||
export function isValidNumber(maybeNumber: any) {
|
||||
|
||||
Reference in New Issue
Block a user