From b5315d4b27a912c119cc1f5dd1daedea1b325ef5 Mon Sep 17 00:00:00 2001 From: Amjad Masad Date: Thu, 3 Mar 2016 15:03:55 -0800 Subject: [PATCH] Revert "Remove Flow annotations and pragmas" This reverts commit 4252244d06b225ab26a02d52c04f9940a3e4d6a2. --- src/options.js | 13 ++++++++++--- src/parser/comments.js | 3 ++- src/parser/index.js | 17 ++++++++++++----- src/parser/util.js | 1 + src/tokenizer/context.js | 16 ++++++++++++++-- src/tokenizer/index.js | 6 ++++++ src/util/whitespace.js | 3 ++- 7 files changed, 47 insertions(+), 12 deletions(-) diff --git a/src/options.js b/src/options.js index 3439e0ccd9..1456be6360 100755 --- a/src/options.js +++ b/src/options.js @@ -1,8 +1,15 @@ +/* @flow */ // A second optional argument can be given to further configure // the parser process. These options are recognized: - -export const defaultOptions = { +export const defaultOptions: { + sourceType: string, + allowReturnOutsideFunction: boolean, + allowImportExportEverywhere: boolean, + allowSuperOutsideMethod: boolean, + plugins: Array, + strictMode: any +} = { // Source type ("script" or "module") for different semantics sourceType: "script", // When enabled, a return at the top level is not considered an @@ -21,7 +28,7 @@ export const defaultOptions = { // Interpret and default an options object -export function getOptions(opts) { +export function getOptions(opts?: Object): Object { let options = {}; for (let key in defaultOptions) { options[key] = opts && key in opts ? opts[key] : defaultOptions[key]; diff --git a/src/parser/comments.js b/src/parser/comments.js index f1a266b58a..9fabbed737 100644 --- a/src/parser/comments.js +++ b/src/parser/comments.js @@ -1,3 +1,4 @@ +/* @flow */ /* eslint max-len: 0 */ /** @@ -118,7 +119,7 @@ pp.processComment = function (node) { // result in an empty array, and if so, the array must be // deleted. node.leadingComments = this.state.leadingComments.slice(0, i); - if (node.leadingComments.length === 0) { + if ((node.leadingComments: Array).length === 0) { node.leadingComments = null; } diff --git a/src/parser/index.js b/src/parser/index.js index 7be5ec3bae..bc034e0549 100644 --- a/src/parser/index.js +++ b/src/parser/index.js @@ -1,3 +1,4 @@ +/* @flow */ import { reservedWords } from "../util/identifier"; import { getOptions } from "../options"; @@ -6,7 +7,7 @@ import Tokenizer from "../tokenizer"; export const plugins = {}; export default class Parser extends Tokenizer { - constructor(options, input) { + constructor(options: Object, input: string) { options = getOptions(options); super(options, input); @@ -22,15 +23,15 @@ export default class Parser extends Tokenizer { } } - hasPlugin(name) { + hasPlugin(name: string): boolean { return !!(this.plugins["*"] || this.plugins[name]); } - extend(name, f) { + extend(name: string, f: Function) { this[name] = f(this[name]); } - loadPlugins(plugins) { + loadPlugins(plugins: Array): Object { let pluginMap = {}; if (plugins.indexOf("flow") >= 0) { @@ -51,7 +52,13 @@ export default class Parser extends Tokenizer { return pluginMap; } - parse() { + parse(): { + type: "File", + program: { + type: "Program", + body: Array + } + } { let file = this.startNode(); let program = this.startNode(); this.nextToken(); diff --git a/src/parser/util.js b/src/parser/util.js index 240f6c672e..03ca0b2e57 100644 --- a/src/parser/util.js +++ b/src/parser/util.js @@ -1,3 +1,4 @@ +/* @flow */ import { types as tt } from "../tokenizer/types"; import Parser from "./index"; diff --git a/src/tokenizer/context.js b/src/tokenizer/context.js index 3e158b915e..bd35e67860 100644 --- a/src/tokenizer/context.js +++ b/src/tokenizer/context.js @@ -1,3 +1,4 @@ +/* @flow */ // The algorithm used to determine whether a regexp can appear at a // given point in the program is loosely based on sweet.js' approach. @@ -7,16 +8,27 @@ import { types as tt } from "./types"; import { lineBreak } from "../util/whitespace"; export class TokContext { - constructor(token, isExpr, preserveSpace, override) { + constructor( + token: string, + isExpr?: boolean, + preserveSpace?: boolean, + override?: Function, + ) { this.token = token; this.isExpr = !!isExpr; this.preserveSpace = !!preserveSpace; this.override = override; } + token: string; + isExpr: boolean; + preserveSpace: boolean; + override: ?Function; } -export const types = { +export const types: { + [key: string]: TokContext; +} = { b_stat: new TokContext("{", false), b_expr: new TokContext("{", true), b_tmpl: new TokContext("${", true), diff --git a/src/tokenizer/index.js b/src/tokenizer/index.js index f16d6de1f9..a029fee07b 100644 --- a/src/tokenizer/index.js +++ b/src/tokenizer/index.js @@ -1,6 +1,7 @@ /* eslint max-len: 0 */ /* eslint indent: 0 */ +import type { TokenType } from "./types"; import { isIdentifierStart, isIdentifierChar, isKeyword } from "../util/identifier"; import { types as tt, keywords as keywordTypes } from "./types"; import { types as ct } from "./context"; @@ -21,6 +22,11 @@ export class Token { this.loc = new SourceLocation(state.startLoc, state.endLoc); } + type: TokenType; + value: any; + start: number; + end: number; + loc: SourceLocation; } // ## Tokenizer diff --git a/src/util/whitespace.js b/src/util/whitespace.js index 4463c8e549..50b90c8ca7 100644 --- a/src/util/whitespace.js +++ b/src/util/whitespace.js @@ -1,3 +1,4 @@ +/* @flow */ // Matches a whole line break (where CRLF is considered a single // line break). Used to count lines. @@ -5,7 +6,7 @@ export const lineBreak = /\r\n?|\n|\u2028|\u2029/; export const lineBreakG = new RegExp(lineBreak.source, "g"); -export function isNewLine(code) { +export function isNewLine(code: number): boolean { return code === 10 || code === 13 || code === 0x2028 || code === 0x2029; }