diff --git a/src/options.js b/src/options.js index 1456be6360..3439e0ccd9 100755 --- a/src/options.js +++ b/src/options.js @@ -1,15 +1,8 @@ -/* @flow */ // A second optional argument can be given to further configure // the parser process. These options are recognized: -export const defaultOptions: { - sourceType: string, - allowReturnOutsideFunction: boolean, - allowImportExportEverywhere: boolean, - allowSuperOutsideMethod: boolean, - plugins: Array, - strictMode: any -} = { + +export const defaultOptions = { // Source type ("script" or "module") for different semantics sourceType: "script", // When enabled, a return at the top level is not considered an @@ -28,7 +21,7 @@ export const defaultOptions: { // Interpret and default an options object -export function getOptions(opts?: Object): Object { +export function getOptions(opts) { let options = {}; for (let key in defaultOptions) { options[key] = opts && key in opts ? opts[key] : defaultOptions[key]; diff --git a/src/parser/comments.js b/src/parser/comments.js index 9fabbed737..f1a266b58a 100644 --- a/src/parser/comments.js +++ b/src/parser/comments.js @@ -1,4 +1,3 @@ -/* @flow */ /* eslint max-len: 0 */ /** @@ -119,7 +118,7 @@ pp.processComment = function (node) { // result in an empty array, and if so, the array must be // deleted. node.leadingComments = this.state.leadingComments.slice(0, i); - if ((node.leadingComments: Array).length === 0) { + if (node.leadingComments.length === 0) { node.leadingComments = null; } diff --git a/src/parser/index.js b/src/parser/index.js index bc034e0549..7be5ec3bae 100644 --- a/src/parser/index.js +++ b/src/parser/index.js @@ -1,4 +1,3 @@ -/* @flow */ import { reservedWords } from "../util/identifier"; import { getOptions } from "../options"; @@ -7,7 +6,7 @@ import Tokenizer from "../tokenizer"; export const plugins = {}; export default class Parser extends Tokenizer { - constructor(options: Object, input: string) { + constructor(options, input) { options = getOptions(options); super(options, input); @@ -23,15 +22,15 @@ export default class Parser extends Tokenizer { } } - hasPlugin(name: string): boolean { + hasPlugin(name) { return !!(this.plugins["*"] || this.plugins[name]); } - extend(name: string, f: Function) { + extend(name, f) { this[name] = f(this[name]); } - loadPlugins(plugins: Array): Object { + loadPlugins(plugins) { let pluginMap = {}; if (plugins.indexOf("flow") >= 0) { @@ -52,13 +51,7 @@ export default class Parser extends Tokenizer { return pluginMap; } - parse(): { - type: "File", - program: { - type: "Program", - body: Array - } - } { + parse() { let file = this.startNode(); let program = this.startNode(); this.nextToken(); diff --git a/src/parser/util.js b/src/parser/util.js index 03ca0b2e57..240f6c672e 100644 --- a/src/parser/util.js +++ b/src/parser/util.js @@ -1,4 +1,3 @@ -/* @flow */ import { types as tt } from "../tokenizer/types"; import Parser from "./index"; diff --git a/src/tokenizer/context.js b/src/tokenizer/context.js index bd35e67860..3e158b915e 100644 --- a/src/tokenizer/context.js +++ b/src/tokenizer/context.js @@ -1,4 +1,3 @@ -/* @flow */ // The algorithm used to determine whether a regexp can appear at a // given point in the program is loosely based on sweet.js' approach. @@ -8,27 +7,16 @@ import { types as tt } from "./types"; import { lineBreak } from "../util/whitespace"; export class TokContext { - constructor( - token: string, - isExpr?: boolean, - preserveSpace?: boolean, - override?: Function, - ) { + constructor(token, isExpr, preserveSpace, override) { this.token = token; this.isExpr = !!isExpr; this.preserveSpace = !!preserveSpace; this.override = override; } - token: string; - isExpr: boolean; - preserveSpace: boolean; - override: ?Function; } -export const types: { - [key: string]: TokContext; -} = { +export const types = { b_stat: new TokContext("{", false), b_expr: new TokContext("{", true), b_tmpl: new TokContext("${", true), diff --git a/src/tokenizer/index.js b/src/tokenizer/index.js index e4a7b4ddc8..8dd238c8a7 100644 --- a/src/tokenizer/index.js +++ b/src/tokenizer/index.js @@ -1,8 +1,7 @@ -/* @noflow */ +import { isIdentifierStart, isIdentifierChar, isKeyword } from "../util/identifier"; /* eslint max-len: 0 */ /* eslint indent: 0 */ -import type { TokenType } from "./types"; import { isIdentifierStart, isIdentifierChar, isKeyword } from "../util/identifier"; import { types as tt, keywords as keywordTypes } from "./types"; import { types as ct } from "./context"; @@ -23,11 +22,6 @@ export class Token { this.loc = new SourceLocation(state.startLoc, state.endLoc); } - type: TokenType; - value: any; - start: number; - end: number; - loc: SourceLocation; } // ## Tokenizer diff --git a/src/util/whitespace.js b/src/util/whitespace.js index 50b90c8ca7..4463c8e549 100644 --- a/src/util/whitespace.js +++ b/src/util/whitespace.js @@ -1,4 +1,3 @@ -/* @flow */ // Matches a whole line break (where CRLF is considered a single // line break). Used to count lines. @@ -6,7 +5,7 @@ export const lineBreak = /\r\n?|\n|\u2028|\u2029/; export const lineBreakG = new RegExp(lineBreak.source, "g"); -export function isNewLine(code: number): boolean { +export function isNewLine(code) { return code === 10 || code === 13 || code === 0x2028 || code === 0x2029; }