Fixes for HDFS node expansion on BDC connections (#14174)

* Change cluster test connect to use endpoints route

* Add more logging

* More logging

* Only connect to controller if needed

* Add comments
This commit is contained in:
Charles Gagnon
2021-02-05 10:12:15 -08:00
committed by GitHub
parent 1944813c4a
commit 49fa56369c
3 changed files with 37 additions and 9 deletions

View File

@@ -159,17 +159,36 @@ export class ClusterController implements IClusterController {
}
}
/**
* Verify that this cluster supports Kerberos authentication. It does this by sending a request to the Token API route
* without any credentials and verifying that it gets a 401 response back with a Negotiate www-authenticate header.
*/
private async verifyKerberosSupported(): Promise<boolean> {
let tokenApi = new TokenRouterApi(this._url);
tokenApi.setDefaultAuthentication(new SslAuth());
try {
await tokenApi.apiV1TokenPost();
// If we get to here, the route for endpoints doesn't require auth so state this is false
console.warn(`Token API returned success without any auth while verifying Kerberos support for BDC Cluster ${this._url}`);
// If we get to here, the route for tokens doesn't require auth which is an unexpected error state
return false;
}
catch (error) {
let auths = error && error.response && error.response.statusCode === 401 && error.response.headers['www-authenticate'];
return auths && auths.includes('Negotiate');
if (!error.response) {
console.warn(`No response when verifying Kerberos support for BDC Cluster ${this._url} - ${error}`);
return false;
}
if (error.response.statusCode !== 401) {
console.warn(`Got unexpected status code ${error.response.statusCode} when verifying Kerberos support for BDC Cluster ${this._url}`);
return false;
}
const auths = error.response.headers['www-authenticate'] as string[] ?? [];
if (auths.includes('Negotiate')) {
return true;
}
console.warn(`Didn't get expected Negotiate auth type when verifying Kerberos support for BDC Cluster ${this.url}. Supported types : ${auths.join(', ')}`);
return false;
}
}

View File

@@ -107,6 +107,12 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
options: {}
};
// We need to populate some extra information here in order to be able to browse the HDFS nodes.
// First - if the auth type isn't integrated auth then we need to try and find the username to connect
// to the knox endpoint with.
// Next we need the knox endpoint - if we didn't get that from the SQL instance (because the user didn't have permissions
// to see the full DMV usually) then we need to connect to the controller to fetch the full list of endpoints and get it
// that way.
let clusterController: bdc.IClusterController | undefined = undefined;
let authType = clusterConnInfo.options[constants.authenticationTypePropName] = sqlConnInfo.options[constants.authenticationTypePropName];
const controllerEndpoint = endpoints.find(ep => ep.name.toLowerCase() === 'controller');
@@ -129,12 +135,11 @@ async function createSqlClusterConnInfo(sqlConnInfo: azdata.IConnectionProfile |
console.log(`Unexpected error getting Knox username for SQL Cluster connection: ${err}`);
throw err;
}
} else {
clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo);
}
let hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
if (hadoopEndpointIndex < 0) {
clusterController = await getClusterController(controllerEndpoint.endpoint, clusterConnInfo);
endpoints = (await clusterController.getEndPoints()).endPoints;
hadoopEndpointIndex = endpoints.findIndex(ep => ep.name.toLowerCase() === constants.hadoopEndpointNameGateway.toLowerCase());
}
@@ -156,7 +161,8 @@ async function getClusterController(controllerEndpoint: string, connInfo: Connec
connInfo.options[constants.userPropName],
connInfo.options[constants.passwordPropName]);
try {
await controller.getClusterConfig();
// We just want to test the connection - so using getEndpoints since that is available to all users (not just admin)
await controller.getEndPoints();
return controller;
} catch (err) {
// Initial username/password failed so prompt user for username password until either user
@@ -187,7 +193,8 @@ async function getClusterController(controllerEndpoint: string, connInfo: Connec
}
const controller = bdcApi.getClusterController(controllerEndpoint, authType, username, password);
try {
await controller.getClusterConfig();
// We just want to test the connection - so using getEndpoints since that is available to all users (not just admin)
await controller.getEndPoints();
// Update our connection with the new info
connInfo.options[constants.userPropName] = username;
connInfo.options[constants.passwordPropName] = password;

View File

@@ -395,7 +395,8 @@ async function getClusterController(controllerEndpoint: string, authType: bdc.Au
username,
password);
try {
await controller.getClusterConfig();
// We just want to test the connection - so using getEndpoints since that is available to all users (not just admin)
await controller.getEndPoints();
return controller;
} catch (err) {
// Initial username/password failed so prompt user for username password until either user
@@ -426,7 +427,8 @@ async function getClusterController(controllerEndpoint: string, authType: bdc.Au
}
const controller = bdcApi.getClusterController(controllerEndpoint, authType, newUsername, newPassword);
try {
await controller.getClusterConfig();
// We just want to test the connection - so using getEndpoints since that is available to all users (not just admin)
await controller.getEndPoints();
return controller;
} catch (err) {
errorMessage = localize('bdcConnectError', "Error: {0}. ", err.message ?? err);