diff --git a/src/cc_lang_interpreter/application.py b/src/cc_lang_interpreter/application.py index 5315d0b..ead87d1 100644 --- a/src/cc_lang_interpreter/application.py +++ b/src/cc_lang_interpreter/application.py @@ -64,6 +64,8 @@ class Application(ApplicationABC): self._runtime.file = file + if self._runtime.file != '': + Console.write_line('FILE:', self._runtime.file) f = open(file, 'r', encoding='utf-8').readlines() for i in range(0, len(f)): self._runtime.line_count = i + 1 diff --git a/src/parser/model/ast_types_enum.py b/src/parser/model/ast_types_enum.py index 016c6a3..fc8e081 100644 --- a/src/parser/model/ast_types_enum.py +++ b/src/parser/model/ast_types_enum.py @@ -12,4 +12,4 @@ class ASTTypesEnum(Enum): FuncDeclaration = 'func_declaration' VariableDeclaration = 'variable_declaration' VariableValue = 'variable_value' - + UseDirective = 'use_directive' diff --git a/src/parser/service/parser_service.py b/src/parser/service/parser_service.py index 7f7b848..29f668a 100644 --- a/src/parser/service/parser_service.py +++ b/src/parser/service/parser_service.py @@ -150,7 +150,7 @@ class ParserService(ParserABC): break elif i == tokens.count()-1: - self._runtime.error(Error(ErrorCodesEnum.Expected, FormatCharacters.Left_Brace.value)) + self._runtime.error(Error(ErrorCodesEnum.Expected, FormatCharacters.Semicolon.value)) elif i == 0 and token.type == TokenTypes.Keyword and token.value in self._access_keywords: ast.append(AST(ASTTypesEnum.Access, token.value, self._runtime.line_count, self._runtime.line_count)) @@ -200,10 +200,58 @@ class ParserService(ParserABC): return AST(ASTTypesEnum.VariableDeclaration, ast, self._runtime.line_count, self._runtime.line_count) + def _parse_use(self, tokens: List[Token]) -> AST: + """ Parses use imports + + Args: + tokens (List[Token]): Tokens from lexer + + AST: + use Program; + + use Program.Test; + <.> + + Returns: + AST: Library or class AST + """ + ast = List(AST) + i = 0 + # for i in range(0, tokens.count()): + while i < tokens.count(): + token: Token = tokens[i] + + if i == tokens.count()-1 and token.type == TokenTypes.Format_Character and token.value == FormatCharacters.Semicolon.value: + break + + elif i == tokens.count()-1: + self._runtime.error(Error(ErrorCodesEnum.Expected, FormatCharacters.Semicolon.value)) + + elif i == 0 and token.type == TokenTypes.Keyword and token.value == Keywords.Use.value: + self._ast.append(AST(ASTTypesEnum.Keyword, token.value)) + + elif i == 1 and token.type == TokenTypes.Name: + Console.write_line('NAME') + name, last_token = self._parse_name(tokens.skip(i)) + if last_token is not None: + i = tokens.index(last_token) + ast.append(name) + + else: + Console.write_line('TEST') + self._runtime.error(Error(ErrorCodesEnum.Unexpected, token.value)) + + i += 1 + + return AST(ASTTypesEnum.UseDirective, ast, self._runtime.line_count, self._runtime.line_count) + def create_ast(self, tokens: List[Token]) -> List[AST]: self._ast = List(AST) - if tokens.where(lambda t: t.type == TokenTypes.Keyword and t.value == Keywords.Library.value).count() > 0: + if tokens.where(lambda t: t.type == TokenTypes.Keyword and t.value == Keywords.Use.value).count() > 0: + self._ast.append(self._parse_use(tokens)) + + elif tokens.where(lambda t: t.type == TokenTypes.Keyword and t.value == Keywords.Library.value).count() > 0: self._ast.append(self._parse_library_or_class(tokens)) elif tokens.where(lambda t: t.type == TokenTypes.Keyword and t.value == Keywords.Class.value).count() > 0: