Merge from vscode a5cf1da01d5db3d2557132be8d30f89c38019f6c (#8525)

* Merge from vscode a5cf1da01d5db3d2557132be8d30f89c38019f6c

* remove files we don't want

* fix hygiene

* update distro

* update distro

* fix hygiene

* fix strict nulls

* distro

* distro

* fix tests

* fix tests

* add another edit

* fix viewlet icon

* fix azure dialog

* fix some padding

* fix more padding issues
This commit is contained in:
Anthony Dresser
2019-12-04 19:28:22 -08:00
committed by GitHub
parent a8818ab0df
commit f5ce7fb2a5
1507 changed files with 42813 additions and 27370 deletions

View File

@@ -577,23 +577,34 @@ export class PieceTreeBase {
let m: RegExpExecArray | null;
// Reset regex to search from the beginning
searcher.reset(start);
let ret: BufferCursor = { line: 0, column: 0 };
let searchText: string;
let offsetInBuffer: (offset: number) => number;
if (searcher._wordSeparators) {
searchText = buffer.buffer.substring(start, end);
offsetInBuffer = (offset: number) => offset + start;
searcher.reset(-1);
} else {
searchText = buffer.buffer;
offsetInBuffer = (offset: number) => offset;
searcher.reset(start);
}
do {
m = searcher.next(buffer.buffer);
m = searcher.next(searchText);
if (m) {
if (m.index >= end) {
if (offsetInBuffer(m.index) >= end) {
return resultLen;
}
this.positionInBuffer(node, m.index - startOffsetInBuffer, ret);
this.positionInBuffer(node, offsetInBuffer(m.index) - startOffsetInBuffer, ret);
let lineFeedCnt = this.getLineFeedCnt(node.piece.bufferIndex, startCursor, ret);
let retStartColumn = ret.line === startCursor.line ? ret.column - startCursor.column + startColumn : ret.column + 1;
let retEndColumn = retStartColumn + m[0].length;
result[resultLen++] = createFindMatch(new Range(startLineNumber + lineFeedCnt, retStartColumn, startLineNumber + lineFeedCnt, retEndColumn), m, captureMatches);
if (m.index + m[0].length >= end) {
if (offsetInBuffer(m.index) + m[0].length >= end) {
return resultLen;
}
if (resultLen >= limitResultCount) {

View File

@@ -57,7 +57,7 @@ export class PieceTreeTextBufferFactory implements ITextBufferFactory {
}
public getFirstLineText(lengthLimit: number): string {
return this._chunks[0].buffer.substr(0, 100).split(/\r\n|\r|\n/)[0];
return this._chunks[0].buffer.substr(0, lengthLimit).split(/\r\n|\r|\n/)[0];
}
}

View File

@@ -32,7 +32,7 @@ import { BracketsUtils, RichEditBracket, RichEditBrackets } from 'vs/editor/comm
import { ITheme, ThemeColor } from 'vs/platform/theme/common/themeService';
import { withUndefinedAsNull } from 'vs/base/common/types';
import { VSBufferReadableStream, VSBuffer } from 'vs/base/common/buffer';
import { TokensStore, MultilineTokens, countEOL } from 'vs/editor/common/model/tokensStore';
import { TokensStore, MultilineTokens, countEOL, MultilineTokens2, TokensStore2 } from 'vs/editor/common/model/tokensStore';
import { Color } from 'vs/base/common/color';
function createTextBufferBuilder() {
@@ -276,6 +276,7 @@ export class TextModel extends Disposable implements model.ITextModel {
private _languageIdentifier: LanguageIdentifier;
private readonly _languageRegistryListener: IDisposable;
private readonly _tokens: TokensStore;
private readonly _tokens2: TokensStore2;
private readonly _tokenization: TextModelTokenization;
//#endregion
@@ -339,6 +340,7 @@ export class TextModel extends Disposable implements model.ITextModel {
this._trimAutoWhitespaceLines = null;
this._tokens = new TokensStore();
this._tokens2 = new TokensStore2();
this._tokenization = new TextModelTokenization(this);
}
@@ -414,6 +416,7 @@ export class TextModel extends Disposable implements model.ITextModel {
// Flush all tokens
this._tokens.flush();
this._tokens2.flush();
// Destroy all my decorations
this._decorations = Object.create(null);
@@ -1262,8 +1265,9 @@ export class TextModel extends Disposable implements model.ITextModel {
let lineCount = oldLineCount;
for (let i = 0, len = contentChanges.length; i < len; i++) {
const change = contentChanges[i];
const [eolCount, firstLineLength] = countEOL(change.text);
const [eolCount, firstLineLength, lastLineLength] = countEOL(change.text);
this._tokens.acceptEdit(change.range, eolCount, firstLineLength);
this._tokens2.acceptEdit(change.range, eolCount, firstLineLength, lastLineLength, change.text.length > 0 ? change.text.charCodeAt(0) : CharCode.Null);
this._onDidChangeDecorations.fire();
this._decorationsTree.acceptReplace(change.rangeOffset, change.rangeLength, change.text.length, change.forceMoveMarkers);
@@ -1717,6 +1721,15 @@ export class TextModel extends Disposable implements model.ITextModel {
});
}
public setSemanticTokens(tokens: MultilineTokens2[] | null): void {
this._tokens2.set(tokens);
this._emitModelTokensChangedEvent({
tokenizationSupportChanged: false,
ranges: [{ fromLineNumber: 1, toLineNumber: this.getLineCount() }]
});
}
public tokenizeViewport(startLineNumber: number, endLineNumber: number): void {
startLineNumber = Math.max(1, startLineNumber);
endLineNumber = Math.min(this._buffer.getLineCount(), endLineNumber);
@@ -1734,6 +1747,15 @@ export class TextModel extends Disposable implements model.ITextModel {
});
}
public clearSemanticTokens(): void {
this._tokens2.flush();
this._emitModelTokensChangedEvent({
tokenizationSupportChanged: false,
ranges: [{ fromLineNumber: 1, toLineNumber: this.getLineCount() }]
});
}
private _emitModelTokensChangedEvent(e: IModelTokensChangedEvent): void {
if (!this._isDisposing) {
this._onDidChangeTokens.fire(e);
@@ -1772,7 +1794,8 @@ export class TextModel extends Disposable implements model.ITextModel {
private _getLineTokens(lineNumber: number): LineTokens {
const lineText = this.getLineContent(lineNumber);
return this._tokens.getTokens(this._languageIdentifier.id, lineNumber - 1, lineText);
const syntacticTokens = this._tokens.getTokens(this._languageIdentifier.id, lineNumber - 1, lineText);
return this._tokens2.addSemanticTokens(lineNumber, syntacticTokens);
}
public getLanguageIdentifier(): LanguageIdentifier {

View File

@@ -510,7 +510,7 @@ export function isValidMatch(wordSeparators: WordCharacterClassifier, text: stri
}
export class Searcher {
private readonly _wordSeparators: WordCharacterClassifier | null;
public readonly _wordSeparators: WordCharacterClassifier | null;
private readonly _searchRegex: RegExp;
private _prevMatchStartIndex: number;
private _prevMatchLength: number;

View File

@@ -11,9 +11,10 @@ import { ColorId, FontStyle, LanguageId, MetadataConsts, StandardTokenType, Toke
import { writeUInt32BE, readUInt32BE } from 'vs/base/common/buffer';
import { CharCode } from 'vs/base/common/charCode';
export function countEOL(text: string): [number, number] {
export function countEOL(text: string): [number, number, number] {
let eolCount = 0;
let firstLineLength = 0;
let lastLineStart = 0;
for (let i = 0, len = text.length; i < len; i++) {
const chr = text.charCodeAt(i);
@@ -28,17 +29,19 @@ export function countEOL(text: string): [number, number] {
} else {
// \r... case
}
lastLineStart = i + 1;
} else if (chr === CharCode.LineFeed) {
if (eolCount === 0) {
firstLineLength = i;
}
eolCount++;
lastLineStart = i + 1;
}
}
if (eolCount === 0) {
firstLineLength = text.length;
}
return [eolCount, firstLineLength];
return [eolCount, firstLineLength, text.length - lastLineStart];
}
function getDefaultMetadata(topLevelLanguageId: LanguageId): number {
@@ -109,6 +112,453 @@ export class MultilineTokensBuilder {
}
}
export interface IEncodedTokens {
getTokenCount(): number;
getDeltaLine(tokenIndex: number): number;
getMaxDeltaLine(): number;
getStartCharacter(tokenIndex: number): number;
getEndCharacter(tokenIndex: number): number;
getMetadata(tokenIndex: number): number;
clear(): void;
acceptDeleteRange(startDeltaLine: number, startCharacter: number, endDeltaLine: number, endCharacter: number): void;
acceptInsertText(deltaLine: number, character: number, eolCount: number, firstLineLength: number, lastLineLength: number, firstCharCode: number): void;
}
export class SparseEncodedTokens implements IEncodedTokens {
/**
* The encoding of tokens is:
* 4*i deltaLine (from `startLineNumber`)
* 4*i+1 startCharacter (from the line start)
* 4*i+2 endCharacter (from the line start)
* 4*i+3 metadata
*/
private _tokens: Uint32Array;
private _tokenCount: number;
constructor(tokens: Uint32Array) {
this._tokens = tokens;
this._tokenCount = tokens.length / 4;
}
public getMaxDeltaLine(): number {
const tokenCount = this.getTokenCount();
if (tokenCount === 0) {
return -1;
}
return this.getDeltaLine(tokenCount - 1);
}
public getTokenCount(): number {
return this._tokenCount;
}
public getDeltaLine(tokenIndex: number): number {
return this._tokens[4 * tokenIndex];
}
public getStartCharacter(tokenIndex: number): number {
return this._tokens[4 * tokenIndex + 1];
}
public getEndCharacter(tokenIndex: number): number {
return this._tokens[4 * tokenIndex + 2];
}
public getMetadata(tokenIndex: number): number {
return this._tokens[4 * tokenIndex + 3];
}
public clear(): void {
this._tokenCount = 0;
}
public acceptDeleteRange(startDeltaLine: number, startCharacter: number, endDeltaLine: number, endCharacter: number): void {
// This is a bit complex, here are the cases I used to think about this:
//
// 1. The token starts before the deletion range
// 1a. The token is completely before the deletion range
// -----------
// xxxxxxxxxxx
// 1b. The token starts before, the deletion range ends after the token
// -----------
// xxxxxxxxxxx
// 1c. The token starts before, the deletion range ends precisely with the token
// ---------------
// xxxxxxxx
// 1d. The token starts before, the deletion range is inside the token
// ---------------
// xxxxx
//
// 2. The token starts at the same position with the deletion range
// 2a. The token starts at the same position, and ends inside the deletion range
// -------
// xxxxxxxxxxx
// 2b. The token starts at the same position, and ends at the same position as the deletion range
// ----------
// xxxxxxxxxx
// 2c. The token starts at the same position, and ends after the deletion range
// -------------
// xxxxxxx
//
// 3. The token starts inside the deletion range
// 3a. The token is inside the deletion range
// -------
// xxxxxxxxxxxxx
// 3b. The token starts inside the deletion range, and ends at the same position as the deletion range
// ----------
// xxxxxxxxxxxxx
// 3c. The token starts inside the deletion range, and ends after the deletion range
// ------------
// xxxxxxxxxxx
//
// 4. The token starts after the deletion range
// -----------
// xxxxxxxx
//
const tokens = this._tokens;
const tokenCount = this._tokenCount;
const deletedLineCount = (endDeltaLine - startDeltaLine);
let newTokenCount = 0;
let hasDeletedTokens = false;
for (let i = 0; i < tokenCount; i++) {
const srcOffset = 4 * i;
let tokenDeltaLine = tokens[srcOffset];
let tokenStartCharacter = tokens[srcOffset + 1];
let tokenEndCharacter = tokens[srcOffset + 2];
const tokenMetadata = tokens[srcOffset + 3];
if (tokenDeltaLine < startDeltaLine || (tokenDeltaLine === startDeltaLine && tokenEndCharacter <= startCharacter)) {
// 1a. The token is completely before the deletion range
// => nothing to do
newTokenCount++;
continue;
} else if (tokenDeltaLine === startDeltaLine && tokenStartCharacter < startCharacter) {
// 1b, 1c, 1d
// => the token survives, but it needs to shrink
if (tokenDeltaLine === endDeltaLine && tokenEndCharacter > endCharacter) {
// 1d. The token starts before, the deletion range is inside the token
// => the token shrinks by the deletion character count
tokenEndCharacter -= (endCharacter - startCharacter);
} else {
// 1b. The token starts before, the deletion range ends after the token
// 1c. The token starts before, the deletion range ends precisely with the token
// => the token shrinks its ending to the deletion start
tokenEndCharacter = startCharacter;
}
} else if (tokenDeltaLine === startDeltaLine && tokenStartCharacter === startCharacter) {
// 2a, 2b, 2c
if (tokenDeltaLine === endDeltaLine && tokenEndCharacter > endCharacter) {
// 2c. The token starts at the same position, and ends after the deletion range
// => the token shrinks by the deletion character count
tokenEndCharacter -= (endCharacter - startCharacter);
} else {
// 2a. The token starts at the same position, and ends inside the deletion range
// 2b. The token starts at the same position, and ends at the same position as the deletion range
// => the token is deleted
hasDeletedTokens = true;
continue;
}
} else if (tokenDeltaLine < endDeltaLine || (tokenDeltaLine === endDeltaLine && tokenStartCharacter < endCharacter)) {
// 3a, 3b, 3c
if (tokenDeltaLine === endDeltaLine && tokenEndCharacter > endCharacter) {
// 3c. The token starts inside the deletion range, and ends after the deletion range
// => the token moves left and shrinks
if (tokenDeltaLine === startDeltaLine) {
// the deletion started on the same line as the token
// => the token moves left and shrinks
tokenStartCharacter = startCharacter;
tokenEndCharacter = tokenStartCharacter + (tokenEndCharacter - endCharacter);
} else {
// the deletion started on a line above the token
// => the token moves to the beginning of the line
tokenStartCharacter = 0;
tokenEndCharacter = tokenStartCharacter + (tokenEndCharacter - endCharacter);
}
} else {
// 3a. The token is inside the deletion range
// 3b. The token starts inside the deletion range, and ends at the same position as the deletion range
// => the token is deleted
hasDeletedTokens = true;
continue;
}
} else if (tokenDeltaLine > endDeltaLine) {
// 4. (partial) The token starts after the deletion range, on a line below...
if (deletedLineCount === 0 && !hasDeletedTokens) {
// early stop, there is no need to walk all the tokens and do nothing...
newTokenCount = tokenCount;
break;
}
tokenDeltaLine -= deletedLineCount;
} else if (tokenDeltaLine === endDeltaLine && tokenStartCharacter >= endCharacter) {
// 4. (continued) The token starts after the deletion range, on the last line where a deletion occurs
tokenDeltaLine -= deletedLineCount;
tokenStartCharacter -= endCharacter;
tokenEndCharacter -= endCharacter;
} else {
throw new Error(`Not possible!`);
}
const destOffset = 4 * newTokenCount;
tokens[destOffset] = tokenDeltaLine;
tokens[destOffset + 1] = tokenStartCharacter;
tokens[destOffset + 2] = tokenEndCharacter;
tokens[destOffset + 3] = tokenMetadata;
newTokenCount++;
}
this._tokenCount = newTokenCount;
}
public acceptInsertText(deltaLine: number, character: number, eolCount: number, firstLineLength: number, lastLineLength: number, firstCharCode: number): void {
// Here are the cases I used to think about this:
//
// 1. The token is completely before the insertion point
// ----------- |
// 2. The token ends precisely at the insertion point
// -----------|
// 3. The token contains the insertion point
// -----|------
// 4. The token starts precisely at the insertion point
// |-----------
// 5. The token is completely after the insertion point
// | -----------
//
const isInsertingPreciselyOneWordCharacter = (
eolCount === 0
&& firstLineLength === 1
&& (
(firstCharCode >= CharCode.Digit0 && firstCharCode <= CharCode.Digit9)
|| (firstCharCode >= CharCode.A && firstCharCode <= CharCode.Z)
|| (firstCharCode >= CharCode.a && firstCharCode <= CharCode.z)
)
);
const tokens = this._tokens;
const tokenCount = this._tokenCount;
for (let i = 0; i < tokenCount; i++) {
const offset = 4 * i;
let tokenDeltaLine = tokens[offset];
let tokenStartCharacter = tokens[offset + 1];
let tokenEndCharacter = tokens[offset + 2];
if (tokenDeltaLine < deltaLine || (tokenDeltaLine === deltaLine && tokenEndCharacter < character)) {
// 1. The token is completely before the insertion point
// => nothing to do
continue;
} else if (tokenDeltaLine === deltaLine && tokenEndCharacter === character) {
// 2. The token ends precisely at the insertion point
// => expand the end character only if inserting precisely one character that is a word character
if (isInsertingPreciselyOneWordCharacter) {
tokenEndCharacter += 1;
} else {
continue;
}
} else if (tokenDeltaLine === deltaLine && tokenStartCharacter < character && character < tokenEndCharacter) {
// 3. The token contains the insertion point
if (eolCount === 0) {
// => just expand the end character
tokenEndCharacter += firstLineLength;
} else {
// => cut off the token
tokenEndCharacter = character;
}
} else {
// 4. or 5.
if (tokenDeltaLine === deltaLine && tokenStartCharacter === character) {
// 4. The token starts precisely at the insertion point
// => grow the token (by keeping its start constant) only if inserting precisely one character that is a word character
// => otherwise behave as in case 5.
if (isInsertingPreciselyOneWordCharacter) {
continue;
}
}
// => the token must move and keep its size constant
tokenDeltaLine += eolCount;
if (tokenDeltaLine === deltaLine) {
// this token is on the line where the insertion is taking place
if (eolCount === 0) {
tokenStartCharacter += firstLineLength;
tokenEndCharacter += firstLineLength;
} else {
const tokenLength = tokenEndCharacter - tokenStartCharacter;
tokenStartCharacter = lastLineLength + (tokenStartCharacter - character);
tokenEndCharacter = tokenStartCharacter + tokenLength;
}
}
}
tokens[offset] = tokenDeltaLine;
tokens[offset + 1] = tokenStartCharacter;
tokens[offset + 2] = tokenEndCharacter;
}
}
}
export class LineTokens2 {
private readonly _actual: IEncodedTokens;
private readonly _startTokenIndex: number;
private readonly _endTokenIndex: number;
constructor(actual: IEncodedTokens, startTokenIndex: number, endTokenIndex: number) {
this._actual = actual;
this._startTokenIndex = startTokenIndex;
this._endTokenIndex = endTokenIndex;
}
public getCount(): number {
return this._endTokenIndex - this._startTokenIndex + 1;
}
public getStartCharacter(tokenIndex: number): number {
return this._actual.getStartCharacter(this._startTokenIndex + tokenIndex);
}
public getEndCharacter(tokenIndex: number): number {
return this._actual.getEndCharacter(this._startTokenIndex + tokenIndex);
}
public getMetadata(tokenIndex: number): number {
return this._actual.getMetadata(this._startTokenIndex + tokenIndex);
}
}
export class MultilineTokens2 {
public startLineNumber: number;
public endLineNumber: number;
public tokens: IEncodedTokens;
constructor(startLineNumber: number, tokens: IEncodedTokens) {
this.startLineNumber = startLineNumber;
this.tokens = tokens;
this.endLineNumber = this.startLineNumber + this.tokens.getMaxDeltaLine();
}
private _updateEndLineNumber(): void {
this.endLineNumber = this.startLineNumber + this.tokens.getMaxDeltaLine();
}
public getLineTokens(lineNumber: number): LineTokens2 | null {
if (this.startLineNumber <= lineNumber && lineNumber <= this.endLineNumber) {
const findResult = MultilineTokens2._findTokensWithLine(this.tokens, lineNumber - this.startLineNumber);
if (findResult) {
const [startTokenIndex, endTokenIndex] = findResult;
return new LineTokens2(this.tokens, startTokenIndex, endTokenIndex);
}
}
return null;
}
private static _findTokensWithLine(tokens: IEncodedTokens, deltaLine: number): [number, number] | null {
let low = 0;
let high = tokens.getTokenCount() - 1;
while (low < high) {
const mid = low + Math.floor((high - low) / 2);
const midDeltaLine = tokens.getDeltaLine(mid);
if (midDeltaLine < deltaLine) {
low = mid + 1;
} else if (midDeltaLine > deltaLine) {
high = mid - 1;
} else {
let min = mid;
while (min > low && tokens.getDeltaLine(min - 1) === deltaLine) {
min--;
}
let max = mid;
while (max < high && tokens.getDeltaLine(max + 1) === deltaLine) {
max++;
}
return [min, max];
}
}
if (tokens.getDeltaLine(low) === deltaLine) {
return [low, low];
}
return null;
}
public applyEdit(range: IRange, text: string): void {
const [eolCount, firstLineLength, lastLineLength] = countEOL(text);
this.acceptEdit(range, eolCount, firstLineLength, lastLineLength, text.length > 0 ? text.charCodeAt(0) : CharCode.Null);
}
public acceptEdit(range: IRange, eolCount: number, firstLineLength: number, lastLineLength: number, firstCharCode: number): void {
this._acceptDeleteRange(range);
this._acceptInsertText(new Position(range.startLineNumber, range.startColumn), eolCount, firstLineLength, lastLineLength, firstCharCode);
this._updateEndLineNumber();
}
private _acceptDeleteRange(range: IRange): void {
if (range.startLineNumber === range.endLineNumber && range.startColumn === range.endColumn) {
// Nothing to delete
return;
}
const firstLineIndex = range.startLineNumber - this.startLineNumber;
const lastLineIndex = range.endLineNumber - this.startLineNumber;
if (lastLineIndex < 0) {
// this deletion occurs entirely before this block, so we only need to adjust line numbers
const deletedLinesCount = lastLineIndex - firstLineIndex;
this.startLineNumber -= deletedLinesCount;
return;
}
const tokenMaxDeltaLine = this.tokens.getMaxDeltaLine();
if (firstLineIndex >= tokenMaxDeltaLine + 1) {
// this deletion occurs entirely after this block, so there is nothing to do
return;
}
if (firstLineIndex < 0 && lastLineIndex >= tokenMaxDeltaLine + 1) {
// this deletion completely encompasses this block
this.startLineNumber = 0;
this.tokens.clear();
return;
}
if (firstLineIndex < 0) {
const deletedBefore = -firstLineIndex;
this.startLineNumber -= deletedBefore;
this.tokens.acceptDeleteRange(0, 0, lastLineIndex, range.endColumn - 1);
} else {
this.tokens.acceptDeleteRange(firstLineIndex, range.startColumn - 1, lastLineIndex, range.endColumn - 1);
}
}
private _acceptInsertText(position: Position, eolCount: number, firstLineLength: number, lastLineLength: number, firstCharCode: number): void {
if (eolCount === 0 && firstLineLength === 0) {
// Nothing to insert
return;
}
const lineIndex = position.lineNumber - this.startLineNumber;
if (lineIndex < 0) {
// this insertion occurs before this block, so we only need to adjust line numbers
this.startLineNumber += eolCount;
return;
}
const tokenMaxDeltaLine = this.tokens.getMaxDeltaLine();
if (lineIndex >= tokenMaxDeltaLine + 1) {
// this insertion occurs after this block, so there is nothing to do
return;
}
this.tokens.acceptInsertText(lineIndex, position.column - 1, eolCount, firstLineLength, lastLineLength, firstCharCode);
}
}
export class MultilineTokens {
public startLineNumber: number;
@@ -193,6 +643,7 @@ export class MultilineTokens {
// this deletion completely encompasses this block
this.startLineNumber = 0;
this.tokens = [];
return;
}
if (firstLineIndex === lastLineIndex) {
@@ -289,6 +740,120 @@ function toUint32Array(arr: Uint32Array | ArrayBuffer): Uint32Array {
}
}
export class TokensStore2 {
private _pieces: MultilineTokens2[];
constructor() {
this._pieces = [];
}
public flush(): void {
this._pieces = [];
}
public set(pieces: MultilineTokens2[] | null) {
this._pieces = pieces || [];
}
public addSemanticTokens(lineNumber: number, aTokens: LineTokens): LineTokens {
const pieces = this._pieces;
if (pieces.length === 0) {
return aTokens;
}
const pieceIndex = TokensStore2._findFirstPieceWithLine(pieces, lineNumber);
const bTokens = this._pieces[pieceIndex].getLineTokens(lineNumber);
if (!bTokens) {
return aTokens;
}
const aLen = aTokens.getCount();
const bLen = bTokens.getCount();
let aIndex = 0;
let result: number[] = [], resultLen = 0;
for (let bIndex = 0; bIndex < bLen; bIndex++) {
const bStartCharacter = bTokens.getStartCharacter(bIndex);
const bEndCharacter = bTokens.getEndCharacter(bIndex);
const bMetadata = bTokens.getMetadata(bIndex);
// push any token from `a` that is before `b`
while (aIndex < aLen && aTokens.getEndOffset(aIndex) <= bStartCharacter) {
result[resultLen++] = aTokens.getEndOffset(aIndex);
result[resultLen++] = aTokens.getMetadata(aIndex);
aIndex++;
}
// push the token from `a` if it intersects the token from `b`
if (aIndex < aLen && aTokens.getStartOffset(aIndex) < bStartCharacter) {
result[resultLen++] = bStartCharacter;
result[resultLen++] = aTokens.getMetadata(aIndex);
}
// skip any tokens from `a` that are contained inside `b`
while (aIndex < aLen && aTokens.getEndOffset(aIndex) <= bEndCharacter) {
aIndex++;
}
const aMetadata = aTokens.getMetadata(aIndex - 1 > 0 ? aIndex - 1 : aIndex);
const languageId = TokenMetadata.getLanguageId(aMetadata);
const tokenType = TokenMetadata.getTokenType(aMetadata);
// push the token from `b`
result[resultLen++] = bEndCharacter;
result[resultLen++] = (
(bMetadata & MetadataConsts.LANG_TTYPE_CMPL)
| ((languageId << MetadataConsts.LANGUAGEID_OFFSET) >>> 0)
| ((tokenType << MetadataConsts.TOKEN_TYPE_OFFSET) >>> 0)
);
}
// push the remaining tokens from `a`
while (aIndex < aLen) {
result[resultLen++] = aTokens.getEndOffset(aIndex);
result[resultLen++] = aTokens.getMetadata(aIndex);
aIndex++;
}
return new LineTokens(new Uint32Array(result), aTokens.getLineContent());
}
private static _findFirstPieceWithLine(pieces: MultilineTokens2[], lineNumber: number): number {
let low = 0;
let high = pieces.length - 1;
while (low < high) {
let mid = low + Math.floor((high - low) / 2);
if (pieces[mid].endLineNumber < lineNumber) {
low = mid + 1;
} else if (pieces[mid].startLineNumber > lineNumber) {
high = mid - 1;
} else {
while (mid > low && pieces[mid - 1].startLineNumber <= lineNumber && lineNumber <= pieces[mid - 1].endLineNumber) {
mid--;
}
return mid;
}
}
return low;
}
//#region Editing
public acceptEdit(range: IRange, eolCount: number, firstLineLength: number, lastLineLength: number, firstCharCode: number): void {
for (const piece of this._pieces) {
piece.acceptEdit(range, eolCount, firstLineLength, lastLineLength, firstCharCode);
}
}
//#endregion
}
export class TokensStore {
private _lineTokens: (Uint32Array | ArrayBuffer | null)[];
private _len: number;