mirror of
https://github.com/ckaczor/azuredatastudio.git
synced 2026-02-07 17:23:56 -05:00
Allow non-admin BDC connections to see BDC features (#12663)
* Add handling for non-admin BDC users * Bump STS * Fix HDFS root node commands * remove nested awaits * colon
This commit is contained in:
20
extensions/big-data-cluster/src/bdc.d.ts
vendored
20
extensions/big-data-cluster/src/bdc.d.ts
vendored
@@ -8,10 +8,30 @@ declare module 'bdc' {
|
||||
getClusterController(url: string, authType: AuthType, username?: string, password?: string): IClusterController;
|
||||
}
|
||||
|
||||
export interface IEndpointModel {
|
||||
name?: string;
|
||||
description?: string;
|
||||
endpoint?: string;
|
||||
protocol?: string;
|
||||
}
|
||||
|
||||
export interface IHttpResponse {
|
||||
method?: string;
|
||||
url?: string;
|
||||
statusCode?: number;
|
||||
statusMessage?: string;
|
||||
}
|
||||
|
||||
export interface IEndPointsResponse {
|
||||
response: IHttpResponse;
|
||||
endPoints: IEndpointModel[];
|
||||
}
|
||||
|
||||
export type AuthType = 'integrated' | 'basic';
|
||||
|
||||
export interface IClusterController {
|
||||
getClusterConfig(): Promise<any>;
|
||||
getKnoxUsername(clusterUsername: string): Promise<string>;
|
||||
getEndPoints(promptConnect?: boolean): Promise<IEndPointsResponse>
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
import localVarRequest = require('request');
|
||||
import http = require('http');
|
||||
import * as bdc from 'bdc';
|
||||
|
||||
let defaultBasePath = 'https://localhost';
|
||||
|
||||
@@ -203,7 +204,7 @@ export class Dashboards {
|
||||
}
|
||||
}
|
||||
|
||||
export class EndpointModel {
|
||||
export class EndpointModel implements bdc.IEndpointModel {
|
||||
'name'?: string;
|
||||
'description'?: string;
|
||||
'endpoint'?: string;
|
||||
|
||||
@@ -10,7 +10,7 @@ import { TokenRouterApi } from './clusterApiGenerated2';
|
||||
import * as nls from 'vscode-nls';
|
||||
import { ConnectControllerDialog, ConnectControllerModel } from '../dialog/connectControllerDialog';
|
||||
import { getIgnoreSslVerificationConfigSetting } from '../utils';
|
||||
import { IClusterController, AuthType } from 'bdc';
|
||||
import { IClusterController, AuthType, IEndPointsResponse, IHttpResponse } from 'bdc';
|
||||
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
@@ -174,24 +174,17 @@ export class ClusterController implements IClusterController {
|
||||
}
|
||||
|
||||
public async getKnoxUsername(sqlLogin: string): Promise<string> {
|
||||
try {
|
||||
// This all is necessary because prior to CU5 BDC deployments all had the same default username for
|
||||
// accessing the Knox gateway. But in the allowRunAsRoot setting was added and defaulted to false - so
|
||||
// if that exists and is false then we use the username instead.
|
||||
// Note that the SQL username may not necessarily be correct here either - but currently this is what
|
||||
// we're requiring to run Notebooks in a BDC
|
||||
const config = await this.getClusterConfig();
|
||||
return config.spec?.spec?.security?.allowRunAsRoot === false ? sqlLogin : DEFAULT_KNOX_USERNAME;
|
||||
} catch (err) {
|
||||
console.log(`Unexpected error fetching cluster config for getKnoxUsername ${err}`);
|
||||
// Optimistically fall back to SQL login since root shouldn't be typically used going forward
|
||||
return sqlLogin;
|
||||
}
|
||||
|
||||
// This all is necessary because prior to CU5 BDC deployments all had the same default username for
|
||||
// accessing the Knox gateway. But in the allowRunAsRoot setting was added and defaulted to false - so
|
||||
// if that exists and is false then we use the username instead.
|
||||
// Note that the SQL username may not necessarily be correct here either - but currently this is what
|
||||
// we're requiring to run Notebooks in a BDC
|
||||
const config = await this.getClusterConfig();
|
||||
return config.spec?.spec?.security?.allowRunAsRoot === false ? sqlLogin : DEFAULT_KNOX_USERNAME;
|
||||
}
|
||||
|
||||
public async getClusterConfig(promptConnect: boolean = false): Promise<any> {
|
||||
return await this.withConnectRetry<IEndPointsResponse>(
|
||||
return await this.withConnectRetry<any>(
|
||||
this.getClusterConfigImpl,
|
||||
promptConnect,
|
||||
localize('bdc.error.getClusterConfig', "Error retrieving cluster config from {0}", this._url));
|
||||
@@ -387,11 +380,6 @@ export interface IClusterRequest {
|
||||
method?: string;
|
||||
}
|
||||
|
||||
export interface IEndPointsResponse {
|
||||
response: IHttpResponse;
|
||||
endPoints: EndpointModel[];
|
||||
}
|
||||
|
||||
export interface IBdcStatusResponse {
|
||||
response: IHttpResponse;
|
||||
bdcStatus: BdcStatusModel;
|
||||
@@ -419,13 +407,6 @@ export interface MountStatusResponse {
|
||||
mount: MountInfo[];
|
||||
}
|
||||
|
||||
export interface IHttpResponse {
|
||||
method?: string;
|
||||
url?: string;
|
||||
statusCode?: number;
|
||||
statusMessage?: string;
|
||||
}
|
||||
|
||||
export class ControllerError extends Error {
|
||||
public code?: number;
|
||||
public reason?: string;
|
||||
|
||||
@@ -4,10 +4,10 @@
|
||||
*--------------------------------------------------------------------------------------------*/
|
||||
|
||||
import * as azdata from 'azdata';
|
||||
import { ClusterController, ControllerError, IEndPointsResponse } from '../controller/clusterControllerApi';
|
||||
import { ClusterController, ControllerError } from '../controller/clusterControllerApi';
|
||||
import { Deferred } from '../../common/promise';
|
||||
import * as loc from '../localizedConstants';
|
||||
import { AuthType } from 'bdc';
|
||||
import { AuthType, IEndPointsResponse } from 'bdc';
|
||||
|
||||
function getAuthCategory(name: AuthType): azdata.CategoryValue {
|
||||
if (name === 'basic') {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"downloadUrl": "https://github.com/Microsoft/sqltoolsservice/releases/download/v{#version#}/microsoft.sqltools.servicelayer-{#fileName#}",
|
||||
"version": "3.0.0-release.35",
|
||||
"version": "3.0.0-release.37",
|
||||
"downloadFileNames": {
|
||||
"Windows_86": "win-x86-netcoreapp3.1.zip",
|
||||
"Windows_64": "win-x64-netcoreapp3.1.zip",
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import * as vscode from 'vscode';
|
||||
import * as azdata from 'azdata';
|
||||
import * as bdc from 'bdc';
|
||||
import * as nls from 'vscode-nls';
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
@@ -23,15 +24,15 @@ const hyperlinkedEndpoints = [grafanaEndpointName, logsuiEndpointName, sparkHist
|
||||
|
||||
export function registerServiceEndpoints(context: vscode.ExtensionContext): void {
|
||||
azdata.ui.registerModelViewProvider('bdc-endpoints', async (view) => {
|
||||
let endpointsArray: Array<utils.IEndpoint> = Object.assign([], utils.getClusterEndpoints(view.serverInfo));
|
||||
let endpointsArray: Array<bdc.IEndpointModel> = Object.assign([], utils.getClusterEndpoints(view.serverInfo));
|
||||
|
||||
if (endpointsArray.length > 0) {
|
||||
const grafanaEp = endpointsArray.find(e => e.serviceName === grafanaEndpointName);
|
||||
const grafanaEp = endpointsArray.find(e => e.name === grafanaEndpointName);
|
||||
if (grafanaEp && grafanaEp.endpoint && grafanaEp.endpoint.indexOf('/d/wZx3OUdmz') === -1) {
|
||||
// Update to have correct URL
|
||||
grafanaEp.endpoint += '/d/wZx3OUdmz';
|
||||
}
|
||||
const kibanaEp = endpointsArray.find(e => e.serviceName === logsuiEndpointName);
|
||||
const kibanaEp = endpointsArray.find(e => e.name === logsuiEndpointName);
|
||||
if (kibanaEp && kibanaEp.endpoint && kibanaEp.endpoint.indexOf('/app/kibana#/discover') === -1) {
|
||||
// Update to have correct URL
|
||||
kibanaEp.endpoint += '/app/kibana#/discover';
|
||||
@@ -40,13 +41,13 @@ export function registerServiceEndpoints(context: vscode.ExtensionContext): void
|
||||
if (!grafanaEp) {
|
||||
// We are on older CTP, need to manually add some endpoints.
|
||||
// TODO remove once CTP support goes away
|
||||
const managementProxyEp = endpointsArray.find(e => e.serviceName === mgmtProxyName);
|
||||
const managementProxyEp = endpointsArray.find(e => e.name === mgmtProxyName);
|
||||
if (managementProxyEp) {
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, grafanaEndpointName, grafanaDescription, '/grafana/d/wZx3OUdmz'));
|
||||
endpointsArray.push(getCustomEndpoint(managementProxyEp, logsuiEndpointName, logsuiDescription, '/kibana/app/kibana#/discover'));
|
||||
}
|
||||
|
||||
const gatewayEp = endpointsArray.find(e => e.serviceName === 'gateway');
|
||||
const gatewayEp = endpointsArray.find(e => e.name === 'gateway');
|
||||
if (gatewayEp) {
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, sparkHistoryEndpointName, sparkHistoryDescription, '/gateway/default/sparkhistory'));
|
||||
endpointsArray.push(getCustomEndpoint(gatewayEp, yarnUiEndpointName, yarnHistoryDescription, '/gateway/default/yarn'));
|
||||
@@ -54,14 +55,14 @@ export function registerServiceEndpoints(context: vscode.ExtensionContext): void
|
||||
}
|
||||
|
||||
endpointsArray = endpointsArray.map(e => {
|
||||
e.description = getEndpointDisplayText(e.serviceName, e.description);
|
||||
e.description = getEndpointDisplayText(e.name, e.description);
|
||||
return e;
|
||||
});
|
||||
|
||||
// Sort the endpoints. The sort method is that SQL Server Master is first - followed by all
|
||||
// others in alphabetical order by endpoint
|
||||
const sqlServerMasterEndpoints = endpointsArray.filter(e => e.serviceName === Endpoint.sqlServerMaster);
|
||||
endpointsArray = endpointsArray.filter(e => e.serviceName !== Endpoint.sqlServerMaster)
|
||||
const sqlServerMasterEndpoints = endpointsArray.filter(e => e.name === Endpoint.sqlServerMaster);
|
||||
endpointsArray = endpointsArray.filter(e => e.name !== Endpoint.sqlServerMaster)
|
||||
.sort((e1, e2) => e1.endpoint.localeCompare(e2.endpoint));
|
||||
endpointsArray.unshift(...sqlServerMasterEndpoints);
|
||||
|
||||
@@ -70,7 +71,7 @@ export function registerServiceEndpoints(context: vscode.ExtensionContext): void
|
||||
const endPointRow = view.modelBuilder.flexContainer().withLayout({ flexFlow: 'row' }).component();
|
||||
const nameCell = view.modelBuilder.text().withProperties<azdata.TextComponentProperties>({ value: endpointInfo.description }).component();
|
||||
endPointRow.addItem(nameCell, { CSSStyles: { 'width': '35%', 'font-weight': '600', 'user-select': 'text' } });
|
||||
if (hyperlinkedEndpoints.findIndex(e => e === endpointInfo.serviceName) >= 0) {
|
||||
if (hyperlinkedEndpoints.findIndex(e => e === endpointInfo.name) >= 0) {
|
||||
const linkCell = view.modelBuilder.hyperlink()
|
||||
.withProperties<azdata.HyperlinkComponentProperties>({
|
||||
label: endpointInfo.endpoint,
|
||||
@@ -111,10 +112,10 @@ export function registerServiceEndpoints(context: vscode.ExtensionContext): void
|
||||
});
|
||||
}
|
||||
|
||||
function getCustomEndpoint(parentEndpoint: utils.IEndpoint, serviceName: string, description: string, serviceUrl?: string): utils.IEndpoint {
|
||||
function getCustomEndpoint(parentEndpoint: bdc.IEndpointModel, serviceName: string, description: string, serviceUrl?: string): bdc.IEndpointModel {
|
||||
if (parentEndpoint) {
|
||||
let endpoint: utils.IEndpoint = {
|
||||
serviceName: serviceName,
|
||||
let endpoint: bdc.IEndpointModel = {
|
||||
name: serviceName,
|
||||
description: description,
|
||||
endpoint: parentEndpoint.endpoint + serviceUrl,
|
||||
protocol: 'https'
|
||||
|
||||
@@ -221,7 +221,7 @@ async function handleOpenNotebookTask(profile: azdata.IConnectionProfile): Promi
|
||||
|
||||
async function handleOpenClusterDashboardTask(profile: azdata.IConnectionProfile, appContext: AppContext): Promise<void> {
|
||||
const serverInfo = await azdata.connection.getServerInfo(profile.id);
|
||||
const controller = Utils.getClusterEndpoints(serverInfo).find(e => e.serviceName === Endpoint.controller);
|
||||
const controller = Utils.getClusterEndpoints(serverInfo).find(e => e.name === Endpoint.controller);
|
||||
if (!controller) {
|
||||
vscode.window.showErrorMessage(localize('noController', "Could not find the controller endpoint for this instance"));
|
||||
return;
|
||||
|
||||
@@ -406,7 +406,7 @@ export class ManageAccessCommand extends Command {
|
||||
try {
|
||||
let node = await getNode<HdfsFileSourceNode>(context, this.appContext);
|
||||
if (node) {
|
||||
new ManageAccessDialog(node.hdfsPath, node.fileSource).openDialog();
|
||||
new ManageAccessDialog(node.hdfsPath, await node.getFileSource()).openDialog();
|
||||
} else {
|
||||
vscode.window.showErrorMessage(LocalizedConstants.msgMissingNodeContext);
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import { TreeNode } from './treeNodes';
|
||||
import * as utils from '../utils';
|
||||
import { IFileNode } from './types';
|
||||
import { MountStatus } from '../hdfs/mount';
|
||||
import { SqlClusterSession } from './objectExplorerNodeProvider';
|
||||
|
||||
export interface ITreeChangeHandler {
|
||||
notifyNodeChanged(node: TreeNode): void;
|
||||
@@ -29,8 +30,8 @@ export class TreeDataContext {
|
||||
}
|
||||
|
||||
export abstract class HdfsFileSourceNode extends TreeNode {
|
||||
constructor(protected context: TreeDataContext, protected _path: string, public readonly fileSource: IFileSource, protected mountStatus?: MountStatus) {
|
||||
super();
|
||||
constructor(protected context: TreeDataContext, protected _path: string, fileSource: IFileSource | undefined, protected mountStatus?: MountStatus) {
|
||||
super(fileSource);
|
||||
}
|
||||
|
||||
public get hdfsPath(): string {
|
||||
@@ -51,7 +52,8 @@ export abstract class HdfsFileSourceNode extends TreeNode {
|
||||
}
|
||||
|
||||
public async delete(recursive: boolean = false): Promise<void> {
|
||||
await this.fileSource.delete(this.hdfsPath, recursive);
|
||||
const fileSource = await this.getFileSource();
|
||||
await fileSource.delete(this.hdfsPath, recursive);
|
||||
// Notify parent should be updated. If at top, will return undefined which will refresh whole tree
|
||||
(<HdfsFileSourceNode>this.parent).onChildRemoved();
|
||||
this.context.changeHandler.notifyNodeChanged(this.parent);
|
||||
@@ -60,34 +62,28 @@ export abstract class HdfsFileSourceNode extends TreeNode {
|
||||
}
|
||||
|
||||
export class FolderNode extends HdfsFileSourceNode {
|
||||
private children: TreeNode[];
|
||||
private children: TreeNode[] = [];
|
||||
protected _nodeType: string;
|
||||
constructor(context: TreeDataContext, path: string, fileSource: IFileSource, nodeType?: string, mountStatus?: MountStatus) {
|
||||
constructor(context: TreeDataContext, path: string, fileSource: IFileSource | undefined, nodeType?: string, mountStatus?: MountStatus) {
|
||||
super(context, path, fileSource, mountStatus);
|
||||
this._nodeType = nodeType ? nodeType : Constants.MssqlClusterItems.Folder;
|
||||
}
|
||||
|
||||
private ensureChildrenExist(): void {
|
||||
if (!this.children) {
|
||||
this.children = [];
|
||||
}
|
||||
}
|
||||
|
||||
public onChildRemoved(): void {
|
||||
this.children = undefined;
|
||||
}
|
||||
|
||||
async getChildren(refreshChildren: boolean): Promise<TreeNode[]> {
|
||||
if (refreshChildren || !this.children) {
|
||||
this.ensureChildrenExist();
|
||||
try {
|
||||
let files: IFile[] = await this.fileSource.enumerateFiles(this._path);
|
||||
const fileSource = await this.getFileSource();
|
||||
let files: IFile[] = await fileSource.enumerateFiles(this._path);
|
||||
if (files) {
|
||||
// Note: for now, assuming HDFS-provided sorting is sufficient
|
||||
this.children = files.map((file) => {
|
||||
let node: TreeNode = file.fileType === FileType.File ?
|
||||
new FileNode(this.context, file.path, this.fileSource, this.getChildMountStatus(file)) :
|
||||
new FolderNode(this.context, file.path, this.fileSource, Constants.MssqlClusterItems.Folder, this.getChildMountStatus(file));
|
||||
new FileNode(this.context, file.path, fileSource, this.getChildMountStatus(file)) :
|
||||
new FolderNode(this.context, file.path, fileSource, Constants.MssqlClusterItems.Folder, this.getChildMountStatus(file));
|
||||
node.parent = this;
|
||||
return node;
|
||||
});
|
||||
@@ -153,8 +149,9 @@ export class FolderNode extends HdfsFileSourceNode {
|
||||
}
|
||||
|
||||
private async writeFileAsync(localFile: IFile): Promise<FileNode> {
|
||||
await this.fileSource.writeFile(localFile, this._path);
|
||||
let fileNode = new FileNode(this.context, File.createPath(this._path, File.getBasename(localFile)), this.fileSource);
|
||||
const fileSource = await this.getFileSource();
|
||||
await fileSource.writeFile(localFile, this._path);
|
||||
let fileNode = new FileNode(this.context, File.createPath(this._path, File.getBasename(localFile)), fileSource);
|
||||
return fileNode;
|
||||
}
|
||||
|
||||
@@ -163,8 +160,9 @@ export class FolderNode extends HdfsFileSourceNode {
|
||||
}
|
||||
|
||||
private async mkdirAsync(name: string): Promise<FolderNode> {
|
||||
await this.fileSource.mkdir(name, this._path);
|
||||
let subDir = new FolderNode(this.context, File.createPath(this._path, name), this.fileSource);
|
||||
const fileSource = await this.getFileSource();
|
||||
await fileSource.mkdir(name, this._path);
|
||||
let subDir = new FolderNode(this.context, File.createPath(this._path, name), fileSource);
|
||||
return subDir;
|
||||
}
|
||||
|
||||
@@ -186,8 +184,8 @@ export class FolderNode extends HdfsFileSourceNode {
|
||||
|
||||
export class ConnectionNode extends FolderNode {
|
||||
|
||||
constructor(context: TreeDataContext, private displayName: string, fileSource: IFileSource) {
|
||||
super(context, '/', fileSource, Constants.MssqlClusterItems.Connection);
|
||||
constructor(context: TreeDataContext, private displayName: string, private clusterSession: SqlClusterSession) {
|
||||
super(context, '/', undefined, Constants.MssqlClusterItems.Connection);
|
||||
}
|
||||
|
||||
getDisplayName(): string {
|
||||
@@ -204,6 +202,16 @@ export class ConnectionNode extends FolderNode {
|
||||
return item;
|
||||
}
|
||||
|
||||
public async getFileSource(): Promise<IFileSource | undefined> {
|
||||
// The node is initially created without a filesource and then one is created only once an action is
|
||||
// taken that requires a connection
|
||||
const fileSource = await super.getFileSource();
|
||||
if (!fileSource) {
|
||||
await this.updateFileSource(await this.clusterSession.getSqlClusterConnection());
|
||||
}
|
||||
return super.getFileSource();
|
||||
}
|
||||
|
||||
getNodeInfo(): azdata.NodeInfo {
|
||||
// TODO handle error message case by returning it in the OE API
|
||||
// TODO support better mapping of node type
|
||||
@@ -264,18 +272,21 @@ export class FileNode extends HdfsFileSourceNode implements IFileNode {
|
||||
}
|
||||
|
||||
public async getFileContentsAsString(maxBytes?: number): Promise<string> {
|
||||
let contents: Buffer = await this.fileSource.readFile(this.hdfsPath, maxBytes);
|
||||
const fileSource = await this.getFileSource();
|
||||
let contents: Buffer = await fileSource.readFile(this.hdfsPath, maxBytes);
|
||||
return contents ? contents.toString('utf8') : '';
|
||||
}
|
||||
|
||||
public async getFileLinesAsString(maxLines: number): Promise<string> {
|
||||
let contents: Buffer = await this.fileSource.readFileLines(this.hdfsPath, maxLines);
|
||||
const fileSource = await this.getFileSource();
|
||||
let contents: Buffer = await fileSource.readFileLines(this.hdfsPath, maxLines);
|
||||
return contents ? contents.toString('utf8') : '';
|
||||
}
|
||||
|
||||
public writeFileContentsToDisk(localPath: string, cancelToken?: vscode.CancellationTokenSource): Promise<vscode.Uri> {
|
||||
public async writeFileContentsToDisk(localPath: string, cancelToken?: vscode.CancellationTokenSource): Promise<vscode.Uri> {
|
||||
const fileSource = await this.getFileSource();
|
||||
return new Promise((resolve, reject) => {
|
||||
let readStream: fs.ReadStream = this.fileSource.createReadStream(this.hdfsPath);
|
||||
let readStream: fs.ReadStream = fileSource.createReadStream(this.hdfsPath);
|
||||
readStream.on('error', (err) => {
|
||||
reject(err);
|
||||
});
|
||||
@@ -320,7 +331,7 @@ class ErrorNode extends TreeNode {
|
||||
|
||||
private _nodePathValue: string;
|
||||
constructor(private message: string) {
|
||||
super();
|
||||
super(undefined);
|
||||
}
|
||||
|
||||
public static create(message: string, parent: TreeNode, errorCode?: number): ErrorNode {
|
||||
|
||||
@@ -13,23 +13,22 @@ import { SqlClusterConnection } from './connection';
|
||||
import * as utils from '../utils';
|
||||
import { TreeNode } from './treeNodes';
|
||||
import { ConnectionNode, TreeDataContext, ITreeChangeHandler } from './hdfsProvider';
|
||||
import { IFileSource } from './fileSources';
|
||||
import { AppContext } from '../appContext';
|
||||
import * as constants from '../constants';
|
||||
import * as SqlClusterLookUp from '../sqlClusterLookUp';
|
||||
import { ICommandObjectExplorerContext } from './command';
|
||||
import { IPrompter, IQuestion, QuestionTypes } from '../prompts/question';
|
||||
import { getSqlClusterConnectionParams } from '../sqlClusterLookUp';
|
||||
|
||||
export const mssqlOutputChannel = vscode.window.createOutputChannel(constants.providerId);
|
||||
|
||||
export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azdata.ObjectExplorerNodeProvider, ITreeChangeHandler {
|
||||
public readonly supportedProviderId: string = constants.providerId;
|
||||
private sessionMap: Map<string, SqlClusterSession>;
|
||||
private clusterSessionMap: Map<string, SqlClusterSession>;
|
||||
private expandCompleteEmitter = new vscode.EventEmitter<azdata.ObjectExplorerExpandInfo>();
|
||||
|
||||
constructor(private prompter: IPrompter, private appContext: AppContext) {
|
||||
super();
|
||||
this.sessionMap = new Map<string, SqlClusterSession>();
|
||||
this.clusterSessionMap = new Map<string, SqlClusterSession>();
|
||||
this.appContext.registerService<MssqlObjectExplorerNodeProvider>(constants.ObjectExplorerService, this);
|
||||
}
|
||||
|
||||
@@ -49,12 +48,8 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
let sqlConnProfile = await azdata.objectexplorer.getSessionConnectionProfile(session.sessionId);
|
||||
if (!sqlConnProfile) { return false; }
|
||||
|
||||
let clusterConnInfo = await SqlClusterLookUp.getSqlClusterConnection(sqlConnProfile);
|
||||
if (!clusterConnInfo) { return false; }
|
||||
|
||||
let clusterConnection = new SqlClusterConnection(clusterConnInfo);
|
||||
let clusterSession = new SqlClusterSession(clusterConnection, session, sqlConnProfile, this.appContext, this);
|
||||
this.sessionMap.set(session.sessionId, clusterSession);
|
||||
let clusterSession = new SqlClusterSession(session, sqlConnProfile, this.appContext, this);
|
||||
this.clusterSessionMap.set(session.sessionId, clusterSession);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -69,7 +64,7 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
}
|
||||
|
||||
private async doExpandNode(nodeInfo: azdata.ExpandNodeInfo, isRefresh: boolean = false): Promise<boolean> {
|
||||
let session = this.sessionMap.get(nodeInfo.sessionId);
|
||||
let session = this.clusterSessionMap.get(nodeInfo.sessionId);
|
||||
let response: azdata.ObjectExplorerExpandInfo = {
|
||||
sessionId: nodeInfo.sessionId,
|
||||
nodePath: nodeInfo.nodePath,
|
||||
@@ -117,20 +112,31 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
// Only child returned when failure happens : When failed with 'Unauthorized' error, prompt for password.
|
||||
if (children.length === 1 && this.hasExpansionError(children)) {
|
||||
if (children[0].errorStatusCode === 401) {
|
||||
const sqlClusterConnection = await session.getSqlClusterConnection();
|
||||
// First prompt for username (defaulting to existing username)
|
||||
let username: string = await this.promptInput(localize('promptUsername', "Please provide the username to connect to HDFS:"), session.sqlClusterConnection.user);
|
||||
let username = await this.prompter.promptSingle<string>(<IQuestion>{
|
||||
type: QuestionTypes.input,
|
||||
name: 'inputPrompt',
|
||||
message: localize('promptUsername', "Please provide the username to connect to HDFS:"),
|
||||
default: sqlClusterConnection.user
|
||||
});
|
||||
// Only update the username if it's different than the original (the update functions ignore falsy values)
|
||||
if (username === session.sqlClusterConnection.user) {
|
||||
if (username === sqlClusterConnection.user) {
|
||||
username = '';
|
||||
}
|
||||
session.sqlClusterConnection.updateUsername(username);
|
||||
sqlClusterConnection.updateUsername(username);
|
||||
|
||||
// And then prompt for password
|
||||
const password: string = await this.promptPassword(localize('prmptPwd', "Please provide the password to connect to HDFS:"));
|
||||
session.sqlClusterConnection.updatePassword(password);
|
||||
const password = await this.prompter.promptSingle<string>(<IQuestion>{
|
||||
type: QuestionTypes.password,
|
||||
name: 'passwordPrompt',
|
||||
message: localize('prmptPwd', "Please provide the password to connect to HDFS:"),
|
||||
default: ''
|
||||
});
|
||||
sqlClusterConnection.updatePassword(password);
|
||||
|
||||
if (username || password) {
|
||||
await node.updateFileSource(session.sqlClusterConnection);
|
||||
await node.updateFileSource(sqlClusterConnection);
|
||||
children = await node.getChildren(true);
|
||||
}
|
||||
}
|
||||
@@ -150,31 +156,13 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
this.expandCompleteEmitter.fire(expandResult);
|
||||
}
|
||||
|
||||
private async promptInput(promptMsg: string, defaultValue: string): Promise<string> {
|
||||
return await this.prompter.promptSingle(<IQuestion>{
|
||||
type: QuestionTypes.input,
|
||||
name: 'inputPrompt',
|
||||
message: promptMsg,
|
||||
default: defaultValue
|
||||
}).then(confirmed => <string>confirmed);
|
||||
}
|
||||
|
||||
private async promptPassword(promptMsg: string): Promise<string> {
|
||||
return await this.prompter.promptSingle(<IQuestion>{
|
||||
type: QuestionTypes.password,
|
||||
name: 'passwordPrompt',
|
||||
message: promptMsg,
|
||||
default: ''
|
||||
}).then(confirmed => <string>confirmed);
|
||||
}
|
||||
|
||||
refreshNode(nodeInfo: azdata.ExpandNodeInfo): Thenable<boolean> {
|
||||
// TODO #3815 implement properly
|
||||
return this.expandNode(nodeInfo, true);
|
||||
}
|
||||
|
||||
handleSessionClose(closeSessionInfo: azdata.ObjectExplorerCloseSessionInfo): void {
|
||||
this.sessionMap.delete(closeSessionInfo.sessionId);
|
||||
this.clusterSessionMap.delete(closeSessionInfo.sessionId);
|
||||
}
|
||||
|
||||
findNodes(findNodesInfo: azdata.FindNodesInfo): Thenable<azdata.ObjectExplorerFindNodesResponse> {
|
||||
@@ -242,7 +230,7 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
}
|
||||
|
||||
public findSqlClusterSessionBySqlConnProfile(connectionProfile: azdata.IConnectionProfile): SqlClusterSession {
|
||||
for (let session of this.sessionMap.values()) {
|
||||
for (let session of this.clusterSessionMap.values()) {
|
||||
if (session.isMatchedSqlConnection(connectionProfile)) {
|
||||
return session;
|
||||
}
|
||||
@@ -251,11 +239,10 @@ export class MssqlObjectExplorerNodeProvider extends ProviderBase implements azd
|
||||
}
|
||||
}
|
||||
|
||||
class SqlClusterSession {
|
||||
export class SqlClusterSession {
|
||||
private _rootNode: SqlClusterRootNode;
|
||||
|
||||
private _sqlClusterConnection: SqlClusterConnection | undefined = undefined;
|
||||
constructor(
|
||||
private _sqlClusterConnection: SqlClusterConnection,
|
||||
private _sqlSession: azdata.ObjectExplorerSession,
|
||||
private _sqlConnectionProfile: azdata.IConnectionProfile,
|
||||
private _appContext: AppContext,
|
||||
@@ -266,7 +253,13 @@ class SqlClusterSession {
|
||||
this._sqlSession.rootNode.nodePath);
|
||||
}
|
||||
|
||||
public get sqlClusterConnection(): SqlClusterConnection { return this._sqlClusterConnection; }
|
||||
public async getSqlClusterConnection(): Promise<SqlClusterConnection> {
|
||||
if (!this._sqlClusterConnection) {
|
||||
const sqlClusterConnectionParams = await getSqlClusterConnectionParams(this._sqlConnectionProfile);
|
||||
this._sqlClusterConnection = new SqlClusterConnection(sqlClusterConnectionParams);
|
||||
}
|
||||
return this._sqlClusterConnection;
|
||||
}
|
||||
public get sqlSession(): azdata.ObjectExplorerSession { return this._sqlSession; }
|
||||
public get sqlConnectionProfile(): azdata.IConnectionProfile { return this._sqlConnectionProfile; }
|
||||
public get sessionId(): string { return this._sqlSession.sessionId; }
|
||||
@@ -284,7 +277,7 @@ class SqlClusterRootNode extends TreeNode {
|
||||
private _treeDataContext: TreeDataContext,
|
||||
private _nodePathValue: string
|
||||
) {
|
||||
super();
|
||||
super(undefined);
|
||||
}
|
||||
|
||||
public get session(): SqlClusterSession {
|
||||
@@ -304,8 +297,8 @@ class SqlClusterRootNode extends TreeNode {
|
||||
|
||||
private async refreshChildren(): Promise<TreeNode[]> {
|
||||
this._children = [];
|
||||
let fileSource: IFileSource = await this.session.sqlClusterConnection.createHdfsFileSource();
|
||||
let hdfsNode = new ConnectionNode(this._treeDataContext, localize('hdfsFolder', "HDFS"), fileSource);
|
||||
|
||||
let hdfsNode = new ConnectionNode(this._treeDataContext, localize('hdfsFolder', "HDFS"), this.session);
|
||||
hdfsNode.parent = this;
|
||||
this._children.push(hdfsNode);
|
||||
return this._children;
|
||||
|
||||
@@ -13,9 +13,10 @@ type TreeNodePredicate = (node: TreeNode) => boolean;
|
||||
|
||||
export abstract class TreeNode implements ITreeNode {
|
||||
private _parent: TreeNode = undefined;
|
||||
protected fileSource: IFileSource;
|
||||
private _errorStatusCode: number;
|
||||
|
||||
constructor(private _fileSource: IFileSource | undefined) { }
|
||||
|
||||
public get parent(): TreeNode {
|
||||
return this._parent;
|
||||
}
|
||||
@@ -77,8 +78,13 @@ export abstract class TreeNode implements ITreeNode {
|
||||
}
|
||||
|
||||
public async updateFileSource(connection: SqlClusterConnection): Promise<void> {
|
||||
this.fileSource = await connection.createHdfsFileSource();
|
||||
this._fileSource = await connection.createHdfsFileSource();
|
||||
}
|
||||
|
||||
public async getFileSource(): Promise<IFileSource | undefined> {
|
||||
return this._fileSource;
|
||||
}
|
||||
|
||||
/**
|
||||
* The value to use for this node in the node path
|
||||
*/
|
||||
|
||||
@@ -38,7 +38,7 @@ export class OpenSparkJobSubmissionDialogCommand extends Command {
|
||||
try {
|
||||
let sqlClusterConnection: SqlClusterConnection = undefined;
|
||||
if (context.type === constants.ObjectExplorerService) {
|
||||
sqlClusterConnection = SqlClusterLookUp.findSqlClusterConnection(context, this.appContext);
|
||||
sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(context, this.appContext);
|
||||
}
|
||||
if (!sqlClusterConnection) {
|
||||
sqlClusterConnection = await this.selectConnection();
|
||||
@@ -103,7 +103,7 @@ export class OpenSparkJobSubmissionDialogCommand extends Command {
|
||||
let sqlConnection = connectionMap.get(selectedHost);
|
||||
if (!sqlConnection) { throw new Error(errorMsg); }
|
||||
|
||||
let sqlClusterConnection = await SqlClusterLookUp.getSqlClusterConnection(sqlConnection);
|
||||
let sqlClusterConnection = await SqlClusterLookUp.getSqlClusterConnectionParams(sqlConnection);
|
||||
if (!sqlClusterConnection) {
|
||||
throw new Error(localize('errorNotSqlBigDataCluster', "The selected server does not belong to a SQL Server Big Data Cluster"));
|
||||
}
|
||||
@@ -159,7 +159,7 @@ export class OpenSparkJobSubmissionDialogTask {
|
||||
|
||||
async execute(profile: azdata.IConnectionProfile, ...args: any[]): Promise<void> {
|
||||
try {
|
||||
let sqlClusterConnection = SqlClusterLookUp.findSqlClusterConnection(profile, this.appContext);
|
||||
let sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(profile, this.appContext);
|
||||
if (!sqlClusterConnection) {
|
||||
throw new Error(LocalizedConstants.sparkJobSubmissionNoSqlBigDataClusterFound);
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ export class OpenSparkYarnHistoryTask {
|
||||
|
||||
async execute(sqlConnProfile: azdata.IConnectionProfile, isSpark: boolean): Promise<void> {
|
||||
try {
|
||||
let sqlClusterConnection = SqlClusterLookUp.findSqlClusterConnection(sqlConnProfile, this.appContext);
|
||||
let sqlClusterConnection = await SqlClusterLookUp.findSqlClusterConnection(sqlConnProfile, this.appContext);
|
||||
if (!sqlClusterConnection) {
|
||||
let name = isSpark ? 'Spark' : 'Yarn';
|
||||
vscode.window.showErrorMessage(loc.sparkConnectionRequired(name));
|
||||
|
||||
@@ -11,12 +11,17 @@ import * as UUID from 'vscode-languageclient/lib/utils/uuid';
|
||||
import { AppContext } from './appContext';
|
||||
import { SqlClusterConnection } from './objectExplorerNodeProvider/connection';
|
||||
import { ICommandObjectExplorerContext } from './objectExplorerNodeProvider/command';
|
||||
import { IEndpoint, getClusterEndpoints, getHostAndPortFromEndpoint } from './utils';
|
||||
import { getClusterEndpoints, getHostAndPortFromEndpoint } from './utils';
|
||||
import { MssqlObjectExplorerNodeProvider } from './objectExplorerNodeProvider/objectExplorerNodeProvider';
|
||||
import CodeAdapter from './prompts/adapter';
|
||||
import { IQuestion, QuestionTypes } from './prompts/question';
|
||||
import * as nls from 'vscode-nls';
|
||||
import { AuthType } from './util/auth';
|
||||
const localize = nls.loadMessageBundle();
|
||||
|
||||
export function findSqlClusterConnection(
|
||||
export async function findSqlClusterConnection(
|
||||
obj: ICommandObjectExplorerContext | azdata.IConnectionProfile,
|
||||
appContext: AppContext): SqlClusterConnection {
|
||||
appContext: AppContext): Promise<SqlClusterConnection> {
|
||||
|
||||
if (!obj || !appContext) { return undefined; }
|
||||
|
||||
@@ -30,12 +35,12 @@ export function findSqlClusterConnection(
|
||||
|
||||
let sqlClusterConnection: SqlClusterConnection = undefined;
|
||||
if (sqlConnProfile) {
|
||||
sqlClusterConnection = findSqlClusterConnectionBySqlConnProfile(sqlConnProfile, appContext);
|
||||
sqlClusterConnection = await findSqlClusterConnectionBySqlConnProfile(sqlConnProfile, appContext);
|
||||
}
|
||||
return sqlClusterConnection;
|
||||
}
|
||||
|
||||
function findSqlClusterConnectionBySqlConnProfile(sqlConnProfile: azdata.IConnectionProfile, appContext: AppContext): SqlClusterConnection {
|
||||
async function findSqlClusterConnectionBySqlConnProfile(sqlConnProfile: azdata.IConnectionProfile, appContext: AppContext): Promise<SqlClusterConnection> {
|
||||
if (!sqlConnProfile || !appContext) { return undefined; }
|
||||
|
||||
let sqlOeNodeProvider = appContext.getService<MssqlObjectExplorerNodeProvider>(constants.ObjectExplorerService);
|
||||
@@ -44,10 +49,10 @@ function findSqlClusterConnectionBySqlConnProfile(sqlConnProfile: azdata.IConnec
|
||||
let sqlClusterSession = sqlOeNodeProvider.findSqlClusterSessionBySqlConnProfile(sqlConnProfile);
|
||||
if (!sqlClusterSession) { return undefined; }
|
||||
|
||||
return sqlClusterSession.sqlClusterConnection;
|
||||
return sqlClusterSession.getSqlClusterConnection();
|
||||
}
|
||||
|
||||
export async function getSqlClusterConnection(
|
||||
export async function getSqlClusterConnectionParams(
|
||||
obj: azdata.IConnectionProfile | azdata.connection.Connection | ICommandObjectExplorerContext): Promise<ConnectionParam> {
|
||||
|
||||
if (!obj) { return undefined; }
|
||||
@@ -75,12 +80,9 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
|
||||
let serverInfo = await azdata.connection.getServerInfo(connectionId);
|
||||
if (!serverInfo || !serverInfo.options) { return undefined; }
|
||||
|
||||
let endpoints: IEndpoint[] = getClusterEndpoints(serverInfo);
|
||||
let endpoints: bdc.IEndpointModel[] = getClusterEndpoints(serverInfo);
|
||||
if (!endpoints || endpoints.length === 0) { return undefined; }
|
||||
|
||||
let index = endpoints.findIndex(ep => ep.serviceName.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
|
||||
if (index < 0) { return undefined; }
|
||||
|
||||
let credentials = await azdata.connection.getCredentials(connectionId);
|
||||
if (!credentials) { return undefined; }
|
||||
|
||||
@@ -90,29 +92,91 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
|
||||
options: {}
|
||||
};
|
||||
|
||||
let hostAndIp = getHostAndPortFromEndpoint(endpoints[index].endpoint);
|
||||
clusterConnInfo.options[constants.hostPropName] = hostAndIp.host;
|
||||
// TODO should we default the port? Or just ignore later?
|
||||
clusterConnInfo.options[constants.knoxPortPropName] = hostAndIp.port || constants.defaultKnoxPort;
|
||||
let clusterController: bdc.IClusterController | undefined = undefined;
|
||||
let authType = clusterConnInfo.options[constants.authenticationTypePropName] = sqlConnInfo.options[constants.authenticationTypePropName];
|
||||
const controllerEndpoint = endpoints.find(ep => ep.name.toLowerCase() === 'controller');
|
||||
if (authType && authType.toLowerCase() !== constants.integratedAuth) {
|
||||
clusterConnInfo.options[constants.userPropName] = sqlConnInfo.options[constants.userPropName]; //should be the same user as sql master
|
||||
clusterConnInfo.options[constants.passwordPropName] = credentials.password;
|
||||
try {
|
||||
const bdcApi = <bdc.IExtension>await vscode.extensions.getExtension(bdc.constants.extensionName).activate();
|
||||
const controllerEndpoint = endpoints.find(ep => ep.serviceName.toLowerCase() === 'controller');
|
||||
const controller = bdcApi.getClusterController(controllerEndpoint.endpoint, 'basic', sqlConnInfo.options[constants.userPropName], credentials.password);
|
||||
clusterConnInfo.options[constants.userPropName] = await controller.getKnoxUsername(sqlConnInfo.options[constants.userPropName]);
|
||||
clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo);
|
||||
|
||||
clusterConnInfo.options[constants.userPropName] = await clusterController.getKnoxUsername(clusterConnInfo.options[constants.userPropName]);
|
||||
} catch (err) {
|
||||
console.log(`Unexpected error getting Knox username for SQL Cluster connection: ${err}`);
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo);
|
||||
}
|
||||
|
||||
let hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
|
||||
if (hadoopEndpointIndex < 0) {
|
||||
endpoints = (await clusterController.getEndPoints()).endPoints;
|
||||
hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
|
||||
}
|
||||
const hostAndIp = getHostAndPortFromEndpoint(endpoints[hadoopEndpointIndex].endpoint);
|
||||
clusterConnInfo.options[constants.hostPropName] = hostAndIp.host;
|
||||
// TODO should we default the port? Or just ignore later?
|
||||
clusterConnInfo.options[constants.knoxPortPropName] = hostAndIp.port || constants.defaultKnoxPort;
|
||||
clusterConnInfo = connToConnectionParam(clusterConnInfo);
|
||||
|
||||
return clusterConnInfo;
|
||||
}
|
||||
|
||||
async function getClusterController(controllerEndpoint: string, connInfo: ConnectionParam): Promise<bdc.IClusterController | undefined> {
|
||||
const bdcApi = <bdc.IExtension>await vscode.extensions.getExtension(bdc.constants.extensionName).activate();
|
||||
let authType: bdc.AuthType = connInfo.options[constants.authenticationTypePropName] === AuthType.Integrated ? 'integrated' : 'basic';
|
||||
const controller = bdcApi.getClusterController(
|
||||
controllerEndpoint,
|
||||
authType,
|
||||
connInfo.options[constants.userPropName],
|
||||
connInfo.options[constants.passwordPropName]);
|
||||
try {
|
||||
await controller.getClusterConfig();
|
||||
return controller;
|
||||
} catch (err) {
|
||||
// Initial username/password failed so prompt user for username password until either user
|
||||
// cancels out or we successfully connect
|
||||
console.log(`Error connecting to cluster controller: ${err}`);
|
||||
let errorMessage = '';
|
||||
while (true) {
|
||||
const prompter = new CodeAdapter();
|
||||
let username = await prompter.promptSingle<string>(<IQuestion>{
|
||||
type: QuestionTypes.input,
|
||||
name: 'inputPrompt',
|
||||
message: localize('promptBDCUsername', "{0}Please provide the username to connect to the BDC Controller:", errorMessage),
|
||||
default: connInfo.options[constants.userPropName]
|
||||
});
|
||||
if (!username) {
|
||||
console.log(`User cancelled out of username prompt for BDC Controller`);
|
||||
break;
|
||||
}
|
||||
const password = await prompter.promptSingle<string>(<IQuestion>{
|
||||
type: QuestionTypes.password,
|
||||
name: 'passwordPrompt',
|
||||
message: localize('promptBDCPassword', "Please provide the password to connect to the BDC Controller"),
|
||||
default: ''
|
||||
});
|
||||
if (!password) {
|
||||
console.log(`User cancelled out of password prompt for BDC Controller`);
|
||||
break;
|
||||
}
|
||||
const controller = bdcApi.getClusterController(controllerEndpoint, authType, username, password);
|
||||
try {
|
||||
await controller.getClusterConfig();
|
||||
// Update our connection with the new info
|
||||
connInfo.options[constants.userPropName] = username;
|
||||
connInfo.options[constants.passwordPropName] = password;
|
||||
return controller;
|
||||
} catch (err) {
|
||||
errorMessage = localize('bdcConnectError', "Error: {0}. ", err.message ?? err);
|
||||
}
|
||||
}
|
||||
throw new Error(localize('usernameAndPasswordRequired', "Username and password are required"));
|
||||
}
|
||||
|
||||
}
|
||||
function connProfileToConnectionParam(connectionProfile: azdata.IConnectionProfile): ConnectionParam {
|
||||
let result = Object.assign(connectionProfile, { connectionId: connectionProfile.id });
|
||||
return <ConnectionParam>result;
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import * as azdata from 'azdata';
|
||||
import * as vscode from 'vscode';
|
||||
import * as bdc from 'bdc';
|
||||
import * as path from 'path';
|
||||
import * as crypto from 'crypto';
|
||||
import * as os from 'os';
|
||||
@@ -222,15 +223,15 @@ export function getUserHome(): string {
|
||||
return process.env.HOME || process.env.USERPROFILE;
|
||||
}
|
||||
|
||||
export function getClusterEndpoints(serverInfo: azdata.ServerInfo): IEndpoint[] | undefined {
|
||||
export function getClusterEndpoints(serverInfo: azdata.ServerInfo): bdc.IEndpointModel[] | undefined {
|
||||
let endpoints: RawEndpoint[] = serverInfo.options[constants.clusterEndpointsProperty];
|
||||
if (!endpoints || endpoints.length === 0) { return []; }
|
||||
|
||||
return endpoints.map(e => {
|
||||
// If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS
|
||||
let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`;
|
||||
let updatedEndpoint: IEndpoint = {
|
||||
serviceName: e.serviceName,
|
||||
let updatedEndpoint: bdc.IEndpointModel = {
|
||||
name: e.serviceName,
|
||||
description: e.description,
|
||||
endpoint: endpoint,
|
||||
protocol: e.protocol
|
||||
@@ -266,13 +267,6 @@ interface RawEndpoint {
|
||||
port?: number;
|
||||
}
|
||||
|
||||
export interface IEndpoint {
|
||||
serviceName: string;
|
||||
description: string;
|
||||
endpoint: string;
|
||||
protocol: string;
|
||||
}
|
||||
|
||||
export function isValidNumber(maybeNumber: any) {
|
||||
return maybeNumber !== undefined
|
||||
&& maybeNumber !== null
|
||||
|
||||
@@ -305,6 +305,8 @@ export class JupyterSession implements nb.ISession {
|
||||
connectionProfile.options[USER] = await controller.getKnoxUsername(connectionProfile.userName);
|
||||
} catch (err) {
|
||||
console.log(`Unexpected error getting Knox username for Spark kernel: ${err}`);
|
||||
// Optimistically use the SQL login name - that's going to normally be the case after CU5
|
||||
connectionProfile.options[USER] = connectionProfile.userName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,12 @@ export class TestClusterController implements bdc.IClusterController {
|
||||
getKnoxUsername(clusterUsername: string): Promise<string> {
|
||||
return Promise.resolve('knoxUsername');
|
||||
}
|
||||
getEndPoints(promptConnect?: boolean): Promise<bdc.IEndPointsResponse> {
|
||||
return Promise.resolve( {
|
||||
response: undefined,
|
||||
endPoints: []
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
describe('Jupyter Session Manager', function (): void {
|
||||
|
||||
Reference in New Issue
Block a user