支持混合结构loop

This commit is contained in:
Luke 2025-04-25 16:19:20 +08:00
parent f6804d05a9
commit 28d5459b4c
2 changed files with 79 additions and 63 deletions

View File

@ -10,9 +10,12 @@ import org.jcnc.snow.compiler.parser.context.ParserContext;
import org.jcnc.snow.compiler.parser.context.TokenStream; import org.jcnc.snow.compiler.parser.context.TokenStream;
import org.jcnc.snow.compiler.parser.expression.PrattExpressionParser; import org.jcnc.snow.compiler.parser.expression.PrattExpressionParser;
import org.jcnc.snow.compiler.parser.factory.StatementParserFactory; import org.jcnc.snow.compiler.parser.factory.StatementParserFactory;
import org.jcnc.snow.compiler.parser.util.FlexibleSectionParser;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
* 用于解析 loop 语句块支持以下结构 * 用于解析 loop 语句块支持以下结构
@ -36,80 +39,102 @@ public class LoopStatementParser implements StatementParser {
public LoopNode parse(ParserContext ctx) { public LoopNode parse(ParserContext ctx) {
TokenStream ts = ctx.getTokens(); TokenStream ts = ctx.getTokens();
// loop: // 匹配 loop:
ts.expect("loop"); ts.expect("loop");
ts.expect(":"); ts.expect(":");
ts.expectType(TokenType.NEWLINE); ts.expectType(TokenType.NEWLINE);
skipNewlines(ts); skipNewlines(ts);
// initializer: // 用于保存各区块内容
ts.expect("initializer"); final StatementNode[] initializer = new StatementNode[1];
ts.expect(":"); final ExpressionNode[] condition = new ExpressionNode[1];
ts.expectType(TokenType.NEWLINE); final AssignmentNode[] update = new AssignmentNode[1];
skipNewlines(ts); final List<StatementNode> body = new ArrayList<>();
StatementNode initializer =
StatementParserFactory.get(ts.peek().getLexeme()).parse(ctx);
skipNewlines(ts);
// condition: // 构建区块定义 map
ts.expect("condition"); Map<String, FlexibleSectionParser.SectionDefinition> sections = new HashMap<>();
ts.expect(":");
ts.expectType(TokenType.NEWLINE);
skipNewlines(ts);
ExpressionNode condition = new PrattExpressionParser().parse(ctx);
ts.expectType(TokenType.NEWLINE);
skipNewlines(ts);
// update: // initializer 区块
ts.expect("update"); sections.put("initializer", new FlexibleSectionParser.SectionDefinition(
ts.expect(":"); ts1 -> ts1.peek().getLexeme().equals("initializer"),
ts.expectType(TokenType.NEWLINE); (ctx1, ts1) -> {
skipNewlines(ts); ts1.expect("initializer");
ts1.expect(":");
ts1.expectType(TokenType.NEWLINE);
skipNewlines(ts1);
initializer[0] = StatementParserFactory.get(ts1.peek().getLexeme()).parse(ctx1);
skipNewlines(ts1);
}
));
String varName = ts.expectType(TokenType.IDENTIFIER).getLexeme(); // condition 区块
ts.expect("="); sections.put("condition", new FlexibleSectionParser.SectionDefinition(
ExpressionNode updateExpr = new PrattExpressionParser().parse(ctx); ts1 -> ts1.peek().getLexeme().equals("condition"),
ts.expectType(TokenType.NEWLINE); (ctx1, ts1) -> {
AssignmentNode update = new AssignmentNode(varName, updateExpr); ts1.expect("condition");
skipNewlines(ts); ts1.expect(":");
ts1.expectType(TokenType.NEWLINE);
skipNewlines(ts1);
condition[0] = new PrattExpressionParser().parse(ctx1);
ts1.expectType(TokenType.NEWLINE);
skipNewlines(ts1);
}
));
// body: // update 区块
ts.expect("body"); sections.put("update", new FlexibleSectionParser.SectionDefinition(
ts.expect(":"); ts1 -> ts1.peek().getLexeme().equals("update"),
ts.expectType(TokenType.NEWLINE); (ctx1, ts1) -> {
skipNewlines(ts); ts1.expect("update");
ts1.expect(":");
ts1.expectType(TokenType.NEWLINE);
skipNewlines(ts1);
List<StatementNode> body = new ArrayList<>(); String varName = ts1.expectType(TokenType.IDENTIFIER).getLexeme();
while (!(ts.peek().getType() == TokenType.KEYWORD && ts1.expect("=");
ts.peek().getLexeme().equals("end") && ExpressionNode updateExpr = new PrattExpressionParser().parse(ctx1);
ts.peek(1).getLexeme().equals("body"))) { ts1.expectType(TokenType.NEWLINE);
update[0] = new AssignmentNode(varName, updateExpr);
skipNewlines(ts1);
}
));
String keyword = ts.peek().getType() == TokenType.KEYWORD // body 区块
? ts.peek().getLexeme() sections.put("body", new FlexibleSectionParser.SectionDefinition(
: ""; ts1 -> ts1.peek().getLexeme().equals("body"),
(ctx1, ts1) -> {
ts1.expect("body");
ts1.expect(":");
ts1.expectType(TokenType.NEWLINE);
skipNewlines(ts1);
StatementNode stmt = StatementParserFactory.get(keyword).parse(ctx); while (!(ts1.peek().getLexeme().equals("end") &&
body.add(stmt); ts1.peek(1).getLexeme().equals("body"))) {
skipNewlines(ts); String keyword = ts1.peek().getType() == TokenType.KEYWORD
} ? ts1.peek().getLexeme()
: "";
body.add(StatementParserFactory.get(keyword).parse(ctx1));
skipNewlines(ts1);
}
// end body ts1.expect("end");
ts.expect("end"); ts1.expect("body");
ts.expect("body"); ts1.expectType(TokenType.NEWLINE);
ts.expectType(TokenType.NEWLINE); skipNewlines(ts1);
skipNewlines(ts); }
));
// end loop // 使用 FlexibleSectionParser 解析 loop 各部分
FlexibleSectionParser.parse(ctx, ts, sections);
// 匹配 loop 结束
ts.expect("end"); ts.expect("end");
ts.expect("loop"); ts.expect("loop");
ts.expectType(TokenType.NEWLINE); ts.expectType(TokenType.NEWLINE);
return new LoopNode(initializer, condition, update, body); return new LoopNode(initializer[0], condition[0], update[0], body);
} }
/**
* 跳过多余的换行用于清理语法结构和容错
*/
private void skipNewlines(TokenStream ts) { private void skipNewlines(TokenStream ts) {
while (ts.peek().getType() == TokenType.NEWLINE) { while (ts.peek().getType() == TokenType.NEWLINE) {
ts.next(); ts.next();

View File

@ -81,15 +81,6 @@ public class FlexibleSectionParser {
*/ */
public record SectionDefinition(Predicate<TokenStream> condition, BiConsumer<ParserContext, TokenStream> parser) { public record SectionDefinition(Predicate<TokenStream> condition, BiConsumer<ParserContext, TokenStream> parser) {
/**
* 构造函数Java Record 自动生成但我们保留注释以说明其目的
*
* @param condition 判断当前是否应解析该区块的逻辑
* @param parser 负责实际解析过程的处理器
*/
public SectionDefinition {
}
/** /**
* 获取条件判断函数 * 获取条件判断函数
* *