Fix HDFS with AD auth for browse, read file scenarios (#6840)

* Fix HDFS with AD auth for browse, read
- HDFS now fully supports expanding nodes for all levels, including using cookie for auth
- HDFS now support reading files from HDFS
- HDFS write file is broken and will be fixed (either in PR update or separate PR)
- Removed hack to use gateway-0 instead of actual DNS name now these are supported. Needed for testing

* Fix Jupyter error using new DMV with endpoints
This commit is contained in:
Kevin Cunnane
2019-08-20 18:12:38 -07:00
committed by GitHub
parent 29c5977281
commit 1f00249646
7 changed files with 316 additions and 114 deletions

View File

@@ -94,10 +94,20 @@ export enum Platform {
Others
}
interface RawEndpoint {
serviceName: string;
description?: string;
endpoint?: string;
protocol?: string;
ipAddress?: string;
port?: number;
}
export interface IEndpoint {
serviceName: string;
ipAddress: string;
port: number;
description: string;
endpoint: string;
protocol: string;
}
export function getOSPlatform(): Platform {
@@ -142,3 +152,39 @@ export function isEditorTitleFree(title: string): boolean {
let hasNotebookDoc = azdata.nb.notebookDocuments.findIndex(doc => doc.isUntitled && doc.fileName === title) > -1;
return !hasTextDoc && !hasNotebookDoc;
}
export function getClusterEndpoints(serverInfo: azdata.ServerInfo): IEndpoint[] | undefined {
let endpoints: RawEndpoint[] = serverInfo.options['clusterEndpoints'];
if (!endpoints || endpoints.length === 0) { return []; }
return endpoints.map(e => {
// If endpoint is missing, we're on CTP bits. All endpoints from the CTP serverInfo should be treated as HTTPS
let endpoint = e.endpoint ? e.endpoint : `https://${e.ipAddress}:${e.port}`;
let updatedEndpoint: IEndpoint = {
serviceName: e.serviceName,
description: e.description,
endpoint: endpoint,
protocol: e.protocol
};
return updatedEndpoint;
});
}
export type HostAndIp = { host: string, port: string };
export function getHostAndPortFromEndpoint(endpoint: string): HostAndIp {
let authority = vscode.Uri.parse(endpoint).authority;
let hostAndPortRegex = /^(.*)([,:](\d+))/g;
let match = hostAndPortRegex.exec(authority);
if (match) {
return {
host: match[1],
port: match[3]
};
}
return {
host: authority,
port: undefined
};
}

View File

@@ -7,7 +7,7 @@ import { nb, ServerInfo, connection, IConnectionProfile } from 'azdata';
import { Session, Kernel } from '@jupyterlab/services';
import * as fs from 'fs-extra';
import * as nls from 'vscode-nls';
import { Uri } from 'vscode';
import * as vscode from 'vscode';
import * as path from 'path';
import * as utils from '../common/utils';
const localize = nls.loadMessageBundle();
@@ -247,16 +247,9 @@ export class JupyterSession implements nb.ISession {
if (!clusterEndpoint) {
return Promise.reject(new Error(localize('connectionNotValid', "Spark kernels require a connection to a SQL Server big data cluster master instance.")));
}
if (this.isIntegratedAuth(connection)) {
// Hack: for now, we need to use gateway-0 for integrated auth
let sqlDnsName: string = connection.options['server'].split(',')[0];
let parts = sqlDnsName.split('.');
parts[0] = 'gateway-0';
connection.options[KNOX_ENDPOINT_SERVER] = parts.join('.');
} else {
connection.options[KNOX_ENDPOINT_SERVER] = clusterEndpoint.ipAddress;
}
connection.options[KNOX_ENDPOINT_PORT] = clusterEndpoint.port;
let hostAndPort = utils.getHostAndPortFromEndpoint(clusterEndpoint.endpoint);
connection.options[KNOX_ENDPOINT_SERVER] = hostAndPort.host;
connection.options[KNOX_ENDPOINT_PORT] = hostAndPort.port;
connection.options[USER] = DEFAULT_CLUSTER_USER_NAME;
}
else {
@@ -265,7 +258,7 @@ export class JupyterSession implements nb.ISession {
this.setHostAndPort(':', connection);
this.setHostAndPort(',', connection);
let server = Uri.parse(utils.getLivyUrl(connection.options[KNOX_ENDPOINT_SERVER], connection.options[KNOX_ENDPOINT_PORT])).toString();
let server = vscode.Uri.parse(utils.getLivyUrl(connection.options[KNOX_ENDPOINT_SERVER], connection.options[KNOX_ENDPOINT_PORT])).toString();
let doNotCallChangeEndpointParams = this.isIntegratedAuth(connection) ?
`%_do_not_call_change_endpoint --server=${server} --auth=Kerberos`
: `%_do_not_call_change_endpoint --username=${connection.options[USER]} --password=${connection.options['password']} --server=${server} --auth=Basic_Access`;
@@ -316,7 +309,7 @@ export class JupyterSession implements nb.ISession {
if (!serverInfo || !serverInfo.options) {
return undefined;
}
let endpoints: utils.IEndpoint[] = serverInfo.options['clusterEndpoints'];
let endpoints: utils.IEndpoint[] = utils.getClusterEndpoints(serverInfo);
if (!endpoints || endpoints.length === 0) {
return undefined;
}