diff --git a/src/main/java/org/jcnc/snow/compiler/parser/statement/LoopStatementParser.java b/src/main/java/org/jcnc/snow/compiler/parser/statement/LoopStatementParser.java index bcc69c1..a48bdb0 100644 --- a/src/main/java/org/jcnc/snow/compiler/parser/statement/LoopStatementParser.java +++ b/src/main/java/org/jcnc/snow/compiler/parser/statement/LoopStatementParser.java @@ -10,9 +10,12 @@ import org.jcnc.snow.compiler.parser.context.ParserContext; import org.jcnc.snow.compiler.parser.context.TokenStream; import org.jcnc.snow.compiler.parser.expression.PrattExpressionParser; import org.jcnc.snow.compiler.parser.factory.StatementParserFactory; +import org.jcnc.snow.compiler.parser.util.FlexibleSectionParser; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * 用于解析 loop 语句块,支持以下结构: @@ -36,80 +39,102 @@ public class LoopStatementParser implements StatementParser { public LoopNode parse(ParserContext ctx) { TokenStream ts = ctx.getTokens(); - // loop: + // 匹配 loop: ts.expect("loop"); ts.expect(":"); ts.expectType(TokenType.NEWLINE); skipNewlines(ts); - // initializer: - ts.expect("initializer"); - ts.expect(":"); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); - StatementNode initializer = - StatementParserFactory.get(ts.peek().getLexeme()).parse(ctx); - skipNewlines(ts); + // 用于保存各区块内容 + final StatementNode[] initializer = new StatementNode[1]; + final ExpressionNode[] condition = new ExpressionNode[1]; + final AssignmentNode[] update = new AssignmentNode[1]; + final List body = new ArrayList<>(); - // condition: - ts.expect("condition"); - ts.expect(":"); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); - ExpressionNode condition = new PrattExpressionParser().parse(ctx); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); + // 构建区块定义 map + Map sections = new HashMap<>(); - // update: - ts.expect("update"); - ts.expect(":"); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); + // initializer 区块 + sections.put("initializer", new FlexibleSectionParser.SectionDefinition( + ts1 -> ts1.peek().getLexeme().equals("initializer"), + (ctx1, ts1) -> { + ts1.expect("initializer"); + ts1.expect(":"); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); + initializer[0] = StatementParserFactory.get(ts1.peek().getLexeme()).parse(ctx1); + skipNewlines(ts1); + } + )); - String varName = ts.expectType(TokenType.IDENTIFIER).getLexeme(); - ts.expect("="); - ExpressionNode updateExpr = new PrattExpressionParser().parse(ctx); - ts.expectType(TokenType.NEWLINE); - AssignmentNode update = new AssignmentNode(varName, updateExpr); - skipNewlines(ts); + // condition 区块 + sections.put("condition", new FlexibleSectionParser.SectionDefinition( + ts1 -> ts1.peek().getLexeme().equals("condition"), + (ctx1, ts1) -> { + ts1.expect("condition"); + ts1.expect(":"); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); + condition[0] = new PrattExpressionParser().parse(ctx1); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); + } + )); - // body: - ts.expect("body"); - ts.expect(":"); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); + // update 区块 + sections.put("update", new FlexibleSectionParser.SectionDefinition( + ts1 -> ts1.peek().getLexeme().equals("update"), + (ctx1, ts1) -> { + ts1.expect("update"); + ts1.expect(":"); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); - List body = new ArrayList<>(); - while (!(ts.peek().getType() == TokenType.KEYWORD && - ts.peek().getLexeme().equals("end") && - ts.peek(1).getLexeme().equals("body"))) { + String varName = ts1.expectType(TokenType.IDENTIFIER).getLexeme(); + ts1.expect("="); + ExpressionNode updateExpr = new PrattExpressionParser().parse(ctx1); + ts1.expectType(TokenType.NEWLINE); + update[0] = new AssignmentNode(varName, updateExpr); + skipNewlines(ts1); + } + )); - String keyword = ts.peek().getType() == TokenType.KEYWORD - ? ts.peek().getLexeme() - : ""; + // body 区块 + sections.put("body", new FlexibleSectionParser.SectionDefinition( + ts1 -> ts1.peek().getLexeme().equals("body"), + (ctx1, ts1) -> { + ts1.expect("body"); + ts1.expect(":"); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); - StatementNode stmt = StatementParserFactory.get(keyword).parse(ctx); - body.add(stmt); - skipNewlines(ts); - } + while (!(ts1.peek().getLexeme().equals("end") && + ts1.peek(1).getLexeme().equals("body"))) { + String keyword = ts1.peek().getType() == TokenType.KEYWORD + ? ts1.peek().getLexeme() + : ""; + body.add(StatementParserFactory.get(keyword).parse(ctx1)); + skipNewlines(ts1); + } - // end body - ts.expect("end"); - ts.expect("body"); - ts.expectType(TokenType.NEWLINE); - skipNewlines(ts); + ts1.expect("end"); + ts1.expect("body"); + ts1.expectType(TokenType.NEWLINE); + skipNewlines(ts1); + } + )); - // end loop + // 使用 FlexibleSectionParser 解析 loop 各部分 + FlexibleSectionParser.parse(ctx, ts, sections); + + // 匹配 loop 结束 ts.expect("end"); ts.expect("loop"); ts.expectType(TokenType.NEWLINE); - return new LoopNode(initializer, condition, update, body); + return new LoopNode(initializer[0], condition[0], update[0], body); } - /** - * 跳过多余的换行,用于清理语法结构和容错。 - */ private void skipNewlines(TokenStream ts) { while (ts.peek().getType() == TokenType.NEWLINE) { ts.next(); diff --git a/src/main/java/org/jcnc/snow/compiler/parser/util/FlexibleSectionParser.java b/src/main/java/org/jcnc/snow/compiler/parser/util/FlexibleSectionParser.java index 95390a0..b99614e 100644 --- a/src/main/java/org/jcnc/snow/compiler/parser/util/FlexibleSectionParser.java +++ b/src/main/java/org/jcnc/snow/compiler/parser/util/FlexibleSectionParser.java @@ -81,15 +81,6 @@ public class FlexibleSectionParser { */ public record SectionDefinition(Predicate condition, BiConsumer parser) { - /** - * 构造函数。Java Record 自动生成,但我们保留注释以说明其目的。 - * - * @param condition 判断当前是否应解析该区块的逻辑 - * @param parser 负责实际解析过程的处理器 - */ - public SectionDefinition { - } - /** * 获取条件判断函数。 *