Merge pull request #121 from GeWuYou/docs/refactor-guidelines-comprehensive

Docs/refactor guidelines comprehensive
This commit is contained in:
gewuyou 2026-03-21 13:47:39 +08:00 committed by GitHub
commit 0442fec2d1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 1057 additions and 97 deletions

View File

@ -0,0 +1,62 @@
schema_version: 1
generated_at_utc: "2026-03-21T04:47:58Z"
generated_from: ".ai/environment/tools.raw.yaml"
generator: "scripts/generate-ai-environment.py"
platform:
family: "wsl-linux"
os: "Linux"
distro: "Ubuntu 24.04.4 LTS"
shell: "bash"
capabilities:
dotnet: true
python: true
node: true
bun: true
docker: true
fast_search: true
json_cli: true
tool_selection:
search:
preferred: "rg"
fallback: "grep"
use_for: "Repository text search."
json:
preferred: "jq"
fallback: "python3"
use_for: "Inspecting or transforming JSON command output."
shell:
preferred: "bash"
fallback: "sh"
use_for: "Repository shell scripts and command execution."
scripting:
preferred: "python3"
fallback: "bash"
use_for: "Non-trivial local automation and helper scripts."
docs_package_manager:
preferred: "bun"
fallback: "npm"
use_for: "Installing and previewing the docs site."
build_and_test:
preferred: "dotnet"
fallback: "unavailable"
use_for: "Build, test, restore, and solution validation."
python:
available: true
helper_packages:
requests: true
rich: true
openai: false
tiktoken: false
pydantic: false
pytest: false
preferences:
prefer_project_listed_tools: true
prefer_python_for_non_trivial_automation: true
avoid_unlisted_system_tools: true
rules:
- "Use rg instead of grep for repository search when rg is available."
- "Use jq for JSON inspection; fall back to python3 if jq is unavailable."
- "Prefer python3 over complex bash for non-trivial scripting when python3 is available."
- "Use bun for docs preview workflows when bun is available; otherwise fall back to npm."
- "Use dotnet for repository build and test workflows."
- "Do not assume unrelated system tools are part of the supported project environment."

View File

@ -0,0 +1,89 @@
schema_version: 1
generated_at_utc: "2026-03-21T04:47:28Z"
generator: "scripts/collect-dev-environment.sh"
platform:
os: "Linux"
distro: "Ubuntu 24.04.4 LTS"
version: "24.04"
kernel: "5.15.167.4-microsoft-standard-WSL2"
wsl: true
wsl_version: "2.4.13"
shell: "bash"
required_runtimes:
dotnet:
installed: true
version: "10.0.104"
path: "/usr/bin/dotnet"
purpose: "Builds and tests the GFramework solution."
python3:
installed: true
version: "Python 3.12.3"
path: "/usr/bin/python3"
purpose: "Runs local automation and environment collection scripts."
node:
installed: true
version: "v20.20.1"
path: "/usr/bin/node"
purpose: "Provides the JavaScript runtime used by docs tooling."
bun:
installed: true
version: "1.3.10"
path: "/root/.bun/bin/bun"
purpose: "Installs and previews the VitePress documentation site."
required_tools:
git:
installed: true
version: "git version 2.43.0"
path: "/usr/bin/git"
purpose: "Source control and patch review."
bash:
installed: true
version: "GNU bash, version 5.2.21(1)-release (x86_64-pc-linux-gnu)"
path: "/usr/bin/bash"
purpose: "Executes repository scripts and shell automation."
rg:
installed: true
version: "ripgrep 15.1.0 (rev af60c2de9d)"
path: "/root/.bun/install/global/node_modules/@openai/codex-linux-x64/vendor/x86_64-unknown-linux-musl/path/rg"
purpose: "Fast text search across the repository."
jq:
installed: true
version: "jq-1.7"
path: "/usr/bin/jq"
purpose: "Inspecting and transforming JSON outputs."
project_tools:
docker:
installed: true
version: "Docker version 29.2.1, build a5c7197"
path: "/usr/bin/docker"
purpose: "Runs MegaLinter and other containerized validation tools."
python_packages:
requests:
installed: true
version: "2.31.0"
purpose: "Simple HTTP calls in local helper scripts."
rich:
installed: true
version: "13.7.1"
purpose: "Readable CLI output for local Python helpers."
openai:
installed: false
version: "not-installed"
purpose: "Optional scripted access to OpenAI APIs."
tiktoken:
installed: false
version: "not-installed"
purpose: "Optional token counting for prompt and context inspection."
pydantic:
installed: false
version: "not-installed"
purpose: "Optional typed config and schema validation for helper scripts."
pytest:
installed: false
version: "not-installed"
purpose: "Optional lightweight testing for Python helper scripts."

4
.gitignore vendored
View File

@ -12,4 +12,6 @@ opencode.json
.omc/
docs/.omc/
docs/.vitepress/cache/
local-plan/
local-plan/
# tool
.venv/

242
AGENTS.md
View File

@ -1,42 +1,220 @@
# Repository Guidelines
# AGENTS.md
## Project Structure & Module Organization
This document is the single source of truth for coding behavior in this repository.
`GFramework.sln` is the entry point for the full .NET solution. Runtime code lives in `GFramework.Core/`,
`GFramework.Game/`, `GFramework.Godot/`, and `GFramework.Ecs.Arch/`. Interface-only contracts stay in the paired
`*.Abstractions/` projects. Roslyn generators are split across `GFramework.SourceGenerators/`,
`GFramework.Godot.SourceGenerators/`, and `GFramework.SourceGenerators.Common/`. Tests mirror the runtime modules in
`GFramework.Core.Tests/`, `GFramework.Game.Tests/`, `GFramework.Ecs.Arch.Tests/`, and
`GFramework.SourceGenerators.Tests/`. Documentation is under `docs/`, Godot templates under `Godot/script_templates/`,
and repository utilities under `scripts/` and `refactor-scripts/`.
All AI agents and contributors must follow these rules when writing, reviewing, or modifying code in `GFramework`.
## Build, Test, and Development Commands
## Environment Capability Inventory
- `dotnet build GFramework.sln` builds the full solution from the repo root.
- `dotnet test GFramework.sln --no-build` runs all NUnit test projects after a build.
- `dotnet test GFramework.Core.Tests --filter "FullyQualifiedName~CommandExecutorTests.Execute"` runs a focused NUnit
test.
- `bash scripts/validate-csharp-naming.sh` checks PascalCase namespace and directory rules used by CI.
- `cd docs && bun install && bun run dev` starts the VitePress docs site locally.
- Before choosing runtimes or CLI tools, read `@.ai/environment/tools.ai.yaml`.
- Use `@.ai/environment/tools.raw.yaml` only when you need the full collected facts behind the AI-facing hints.
- Prefer the project-relevant tools listed there instead of assuming every installed system tool is fair game.
- If the real environment differs from the inventory, use the project-relevant installed tool and report the mismatch.
## Coding Style & Naming Conventions
## Commenting Rules (MUST)
Use standard C# formatting with 4-space indentation and one public type per file. The repository keeps `ImplicitUsings`
disabled and `Nullable` enabled, so write explicit `using` directives and annotate nullability carefully. Follow
`PascalCase` for types, methods, namespaces, directories, and constants; use `_camelCase` for private fields and
`camelCase` for locals and parameters. Keep namespaces aligned with folders, for example
`GFramework.Core.Architectures`.
All generated or modified code MUST include clear and meaningful comments where required by the rules below.
## Testing Guidelines
### XML Documentation (Required)
Tests use NUnit 4 with `Microsoft.NET.Test.Sdk`; some suites also use Moq. Place tests in the matching module test
project and name files `*Tests.cs`. Prefer directory parity with production code, for example `GFramework.Core/Logging/`
and `GFramework.Core.Tests/Logging/`. Add or update tests for every behavior change, especially public APIs, source
generators, and integration paths.
- All public, protected, and internal types and members MUST include XML documentation comments (`///`).
- Use `<summary>`, `<param>`, `<returns>`, `<exception>`, and `<remarks>` where applicable.
- Comments must explain intent, contract, and usage constraints instead of restating syntax.
- If a member participates in lifecycle, threading, registration, or disposal behavior, document that behavior
explicitly.
## Commit & Pull Request Guidelines
### Inline Comments
Recent history follows Conventional Commits style such as `feat(events): ...`, `refactor(localization): ...`,
`docs(guide): ...`, and `test(localization): ...`. Keep commits scoped and imperative. PRs should explain the
motivation, implementation, and validation commands run; link related issues; and include screenshots when docs, UI, or
Godot-facing behavior changes.
- Add inline comments for:
- Non-trivial logic
- Concurrency or threading behavior
- Performance-sensitive paths
- Workarounds, compatibility constraints, or edge cases
- Registration order, lifecycle sequencing, or generated code assumptions
- Avoid obvious comments such as `// increment i`.
### Architecture-Level Comments
- Core framework components such as Architecture, Module, System, Context, Registry, Service Module, and Lifecycle types
MUST include high-level explanations of:
- Responsibilities
- Lifecycle
- Interaction with other components
- Why the abstraction exists
- When to use it instead of alternatives
### Source Generator Comments
- Generated logic and generator pipelines MUST explain:
- What is generated
- Why it is generated
- The semantic assumptions the generator relies on
- Any diagnostics or fallback behavior
### Complex Logic Requirement
- Methods with non-trivial logic MUST document:
- The core idea
- Key decisions
- Edge case handling, if any
### Quality Rules
- Comments MUST NOT be trivial, redundant, or misleading.
- Prefer explaining `why` and `when`, not just `what`.
- Code should remain understandable without requiring external context.
- Prefer slightly more explanation over too little for framework code.
### Enforcement
- Missing required documentation is a coding standards violation.
- Code that does not meet the documentation rules is considered incomplete.
## Code Style
### Language and Project Settings
- Follow the repository defaults:
- `ImplicitUsings` disabled
- `Nullable` enabled
- `GenerateDocumentationFile` enabled for shipped libraries
- `LangVersion` is generally `preview` in the main libraries and abstractions
- Do not rely on implicit imports. Declare every required `using` explicitly.
- Write null-safe code that respects nullable annotations instead of suppressing warnings by default.
### Naming and Structure
- Use the namespace pattern `GFramework.{Module}.{Feature}` with PascalCase segments.
- Follow standard C# naming:
- Types, methods, properties, events, and constants: PascalCase
- Interfaces: `I` prefix
- Parameters and locals: camelCase
- Private fields: `_camelCase`
- Keep abstractions projects free of implementation details and engine-specific dependencies.
- Preserve existing module boundaries. Do not introduce new cross-module dependencies without clear architectural need.
### Formatting
- Use 4 spaces for indentation. Do not use tabs.
- Use Allman braces.
- Keep `using` directives at the top of the file and sort them consistently.
- Separate logical blocks with blank lines when it improves readability.
- Prefer one primary type per file unless the surrounding project already uses a different local pattern.
- Keep line length readable. Around 120 characters is the preferred upper bound.
### C# Conventions
- Prefer explicit, readable code over clever shorthand in framework internals.
- Match existing async patterns and naming conventions (`Async` suffix for asynchronous methods).
- Avoid hidden side effects in property getters, constructors, and registration helpers.
- Preserve deterministic behavior in registries, lifecycle orchestration, and generated outputs.
- When adding analyzers or suppressions, keep them minimal and justify them in code comments if the reason is not
obvious.
### Analyzer and Validation Expectations
- The repository uses `Meziantou.Analyzer`; treat analyzer feedback as part of the coding standard.
- Naming must remain compatible with `scripts/validate-csharp-naming.sh`.
## Testing Requirements
### Required Coverage
- Every non-trivial feature, bug fix, or behavior change MUST include tests or an explicit justification for why a test
is not practical.
- Public API changes must be covered by unit or integration tests.
- Regression fixes should include a test that fails before the fix and passes after it.
### Test Organization
- Mirror the source structure in test projects whenever practical.
- Reuse existing architecture test infrastructure when relevant:
- `ArchitectureTestsBase<T>`
- `SyncTestArchitecture`
- `AsyncTestArchitecture`
- Keep tests focused on observable behavior, not implementation trivia.
### Source Generator Tests
- Source generator changes MUST be covered by generator tests.
- Preserve snapshot-based verification patterns already used in the repository.
- When generator behavior changes intentionally, update snapshots together with the implementation.
### Validation Commands
Use the smallest command set that proves the change, then expand if the change is cross-cutting.
```bash
# Build the full solution
dotnet build GFramework.sln -c Release
# Run all tests
dotnet test GFramework.sln -c Release
# Run a single test project
dotnet test GFramework.Core.Tests -c Release
dotnet test GFramework.Game.Tests -c Release
dotnet test GFramework.SourceGenerators.Tests -c Release
dotnet test GFramework.Ecs.Arch.Tests -c Release
# Run a single NUnit test or test group
dotnet test GFramework.Core.Tests -c Release --filter "FullyQualifiedName~CommandExecutorTests.Execute"
# Validate naming rules used by CI
bash scripts/validate-csharp-naming.sh
```
### Test Execution Expectations
- Run targeted tests for the code you changed whenever possible.
- Run broader solution-level validation for changes that touch shared abstractions, lifecycle behavior, source
generators, or dependency wiring.
- Do not claim completion if required tests were skipped; state what was not run and why.
## Security Rules
- Validate external or user-controlled input before it reaches file system, serialization, reflection, code generation,
or process boundaries.
- Do not build command strings, file paths, type names, or generated code from untrusted input without strict validation
or allow-listing.
- Avoid logging secrets, tokens, credentials, or machine-specific sensitive data.
- Keep source generators deterministic and free of hidden environment or network dependencies.
- Prefer least-privilege behavior for file, process, and environment access.
- Do not introduce unsafe deserialization, broad reflection-based activation, or dynamic code execution unless it is
explicitly required and tightly constrained.
- When adding caching, pooling, or shared mutable state, document thread-safety assumptions and failure modes.
- Minimize new package dependencies. Add them only when necessary and keep scope narrow.
## Documentation Rules
### Code Documentation
- Any change to public API, lifecycle semantics, module behavior, or extension points MUST update the related XML docs.
- If a framework abstraction changes meaning or intended usage, update the explanatory comments in code as part of the
same change.
### Repository Documentation
- Update the relevant `README.md` or `docs/` page when behavior, setup steps, architecture guidance, or user-facing
examples change.
- The main documentation site lives under `docs/`, with Chinese content under `docs/zh-CN/`.
- Keep code samples, package names, and command examples aligned with the current repository state.
- Prefer documenting behavior and design intent, not only API surface.
### Documentation Preview
When documentation changes need local preview, use:
```bash
cd docs && bun install && bun run dev
```
## Review Standard
Before considering work complete, confirm:
- Required comments and XML docs are present
- Code follows repository style and naming rules
- Relevant tests were added or updated
- Sensitive or unsafe behavior was not introduced
- User-facing documentation is updated when needed

153
CLAUDE.md
View File

@ -1,36 +1,22 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
This file provides project understanding for AI agents working in this repository.
## Project Overview
GFramework 是面向游戏开发的模块化 C# 框架,核心能力与引擎解耦。灵感参考 QFramework在模块边界和可扩展性方面持续重构。
GFramework 是面向游戏开发的模块化 C# 框架,核心能力与引擎解耦。项目灵感参考 QFramework在模块边界、工程组织和可扩展性方面持续重构。
## Build & Test Commands
## AI Agent Instructions
```bash
# 构建整个解决方案
dotnet build GFramework.sln -c Release
All coding rules are defined in:
# 运行全部测试
dotnet test GFramework.sln -c Release
@AGENTS.md
# 运行单个测试项目
dotnet test GFramework.Core.Tests -c Release
dotnet test GFramework.Game.Tests -c Release
dotnet test GFramework.SourceGenerators.Tests -c Release
dotnet test GFramework.Ecs.Arch.Tests -c Release
# 运行单个测试方法NUnit filter
dotnet test GFramework.Core.Tests -c Release --filter "FullyQualifiedName~CommandExecutorTests.Execute"
# 命名规范验证CI 中使用)
bash scripts/validate-csharp-naming.sh
```
Follow them strictly.
## Module Dependency Graph
```
```text
GFramework (meta package) ─→ Core + Game
GFramework.Core ─→ Core.Abstractions
GFramework.Game ─→ Game.Abstractions, Core, Core.Abstractions
@ -39,71 +25,110 @@ GFramework.Ecs.Arch ─→ Ecs.Arch.Abstractions, Core, Core.Abstractions
GFramework.SourceGenerators ─→ SourceGenerators.Common, SourceGenerators.Abstractions
```
- **Abstractions projects** (netstandard2.1): 只含接口定义,零实现依赖
- **Core/Game** (net8.0;net9.0;net10.0): 平台无关实现
- **Godot**: Godot 引擎集成层
- **SourceGenerators** (netstandard2.1): Roslyn 增量生成器
- **Abstractions projects** (`netstandard2.1`): 只包含接口和契约定义,不承载运行时实现逻辑。
- **Core / Game / Ecs.Arch** (`net8.0;net9.0;net10.0`): 平台无关的核心实现层。
- **Godot**: Godot 引擎集成层,负责与节点、场景和引擎生命周期对接。
- **SourceGenerators** (`netstandard2.1`): Roslyn 增量源码生成器及其公共基础设施。
## Architecture Pattern
框架核心采用 Architecture / Model / System / Utility 四层结构:
框架核心采用 `Architecture / Model / System / Utility` 四层结构:
- **IArchitecture**: 顶层容器,管理生命周期Init → Ready → Destroy、注册 Model/System/Utility
- **IContextAware**: 统一上下文访问接口,所有组件通过 `SetContext(IArchitectureContext)`得对 Architecture 服务的引用
- **IModel**: 数据层(状态管理),继承 IContextAware
- **ISystem**: 业务逻辑层,继承 IContextAware
- **IUtility**: 无状态工具层
- **IArchitecture**: 顶层容器,负责生命周期管理、组件注册、模块安装和统一服务访问。
- **IContextAware**: 统一上下文访问接口,组件通过 `SetContext(IArchitectureContext)`取架构上下文。
- **IModel**: 数据与状态层,负责长期状态和业务数据建模。
- **ISystem**: 业务逻辑层,负责命令执行、流程编排和规则落地。
- **IUtility**: 通用无状态工具层,供其他层复用。
关键实现类:`GFramework.Core/Architectures/Architecture.cs`(主流程编排)
关键实现位于 `GFramework.Core/Architectures/Architecture.cs`,其职责是作为总协调器串联生命周期、组件注册和模块系统。
## Architecture Details
### Lifecycle
Architecture 负责统一生命周期编排,核心阶段包括:
- `Init`
- `Ready`
- `Destroy`
在实现层中,生命周期被拆分为更细粒度的初始化与销毁阶段,用于保证 Utility、Model、System、服务模块和钩子的顺序一致性。
### Component Coordination
框架通过独立组件协作完成架构编排:
- `ArchitectureLifecycle`: 管理生命周期阶段、阶段转换和生命周期钩子。
- `ArchitectureComponentRegistry`: 管理 Model、System、Utility 的注册与解析。
- `ArchitectureModules`: 管理模块安装、服务模块接入和扩展点注册。
这组拆分的目标是降低单个核心类的职责密度,同时保持对外 API 稳定。
### Context Propagation
`IArchitectureContext` 和相关 Provider 类型负责在组件之间传播上下文能力,使 Model、System
和外部扩展都能通过统一入口访问架构服务,而不直接耦合具体实现细节。
## Key Patterns
**CQRS**: Command/Query 分离支持同步与异步。Mediator 模式通过 `Mediator.SourceGenerator` 实现。
### CQRS
**EventBus**: 类型安全事件总线,支持优先级、过滤器、弱引用订阅。`IEventBus.Send<T>()` / `Register<T>(handler)`
`IUnRegister`
命令与查询分离支持同步与异步执行。Mediator 模式通过源码生成器集成,以减少模板代码并保持调用路径清晰。
**BindableProperty**: 响应式属性绑定,`IBindableProperty<T>.Value` 变更自动触发 `OnValueChanged`
### EventBus
**Coroutine**: 帧驱动协程系统,`IYieldInstruction` + `CoroutineScheduler`,提供 WaitForSeconds/WaitForEvent/WaitForTask
等指令。
类型安全事件总线支持事件发布、订阅、优先级、过滤器和弱引用订阅。它是模块之间松耦合通信的核心基础设施之一。
**IoC**: 通过 `MicrosoftDiContainer` 封装 `Microsoft.Extensions.DependencyInjection`
### BindableProperty
**Service Modules**: `IServiceModule` 模式用于向 Architecture 注册内置服务EventBus、CommandExecutor、QueryExecutor 等)。
响应式属性模型通过值变化通知驱动界面或业务层更新,适合表达轻量级状态同步
## Code Conventions
### Coroutine
- **命名空间**: `GFramework.{Module}.{Feature}` (PascalCase)CI 通过 `scripts/validate-csharp-naming.sh` 强制校验
- **ImplicitUsings: disabled** — 所有 using 必须显式声明
- **Nullable: enabled**
- **LangVersion: preview**
- **GenerateDocumentationFile: true** — 公共 API 需要 XML 文档注释
- **Analyzers**: Meziantou.Analyzer 在构建时强制代码规范
帧驱动协程系统基于 `IYieldInstruction` 和调度器抽象,支持等待时间、事件和任务完成等常见模式。
## Testing
### IoC
- **Framework**: NUnit 4.x + Moq
- **测试结构**: 镜像源码目录(如 `Core.Tests/Command/` 对应 `Core/Command/`
- **基类**: `ArchitectureTestsBase<T>` 提供 Architecture 初始化/销毁模板;`SyncTestArchitecture` /
`AsyncTestArchitecture` 用于集成测试
- **Target frameworks**: net8.0;net10.0
依赖注入通过 `MicrosoftDiContainer``Microsoft.Extensions.DependencyInjection` 进行封装,用于统一组件注册和服务解析体验。
### Service Modules
`IServiceModule` 模式用于向 Architecture 注册内置服务,例如 EventBus、CommandExecutor、QueryExecutor 等。这一模式承担“基础设施能力装配”的职责。
## Source Generators
四个生成器,均为 Roslyn 增量源码生成器:
当前仓库包含多类 Roslyn 增量源码生成器:
- `LoggerGenerator` (`[Log]`): 自动生成 ILogger 字段和日志方法
- `PriorityGenerator` (`[Priority]`): 生成优先级比较实现
- `EnumExtensionsGenerator` (`[GenerateEnumExtensions]`): 枚举扩展方法
- `ContextAwareGenerator` (`[ContextAware]`): 自动实现 IContextAware 接口
- `LoggerGenerator` (`[Log]`): 自动生成日志字段和日志辅助方法。
- `PriorityGenerator` (`[Priority]`): 生成优先级比较相关实现
- `EnumExtensionsGenerator` (`[GenerateEnumExtensions]`): 生成枚举扩展能力。
- `ContextAwareGenerator` (`[ContextAware]`): 自动实现 `IContextAware` 相关样板逻辑。
测试使用快照验证Verify + snapshot files
这些生成器的目标是减少重复代码,同时保持框架层 API 的一致性与可维护性
## Documentation
## Module Structure
VitePress 站点位于 `docs/`,内容为中文 (`docs/zh-CN/`)。修改文档后本地预览
仓库以“抽象层 + 实现层 + 集成层 + 生成器层”的方式组织
```bash
cd docs && bun install && bun run dev
```
- `GFramework.Core.Abstractions` / `GFramework.Game.Abstractions`: 约束接口和公共契约。
- `GFramework.Core` / `GFramework.Game`: 提供平台无关实现。
- `GFramework.Godot`: 提供与 Godot 运行时集成的适配实现。
- `GFramework.Ecs.Arch`: 提供 ECS Architecture 相关扩展。
- `GFramework.SourceGenerators` 及相关 Abstractions/Common: 提供代码生成能力。
这种结构的核心设计目标是让抽象稳定、实现可替换、引擎集成隔离、生成器能力可独立演进。
## Documentation Structure
项目文档位于 `docs/`,中文内容位于 `docs/zh-CN/`。文档内容覆盖:
- 入门与安装
- Core / Game / Godot / ECS 各模块能力
- Source Generator 使用说明
- 教程、最佳实践与故障排查
阅读顺序通常建议先看根目录 `README.md` 和各子模块 `README.md`,再进入 `docs/` 查阅专题说明。
## Design Intent
GFramework 的设计重点不是把所有能力堆进单一核心类,而是通过清晰的模块边界、可组合的服务注册方式、稳定的抽象契约以及适度自动化的源码生成,构建一个适合长期演进的游戏开发基础框架。

View File

@ -118,6 +118,10 @@ GFramework 是一个开源的游戏开发框架,我们欢迎所有形式的贡
## 开发环境设置
当前推荐的项目相关环境、CLI 与 AI 可用工具清单请查看:
- [开发环境能力清单](./contributor/development-environment.md)
### 前置要求
- **.NET SDK**8.0、9.0 或 10.0

View File

@ -0,0 +1,96 @@
# 开发环境能力清单
这份文档只记录对 `GFramework` 当前开发和 AI 协作真正有用的环境能力,不收录与本项目无关的系统工具。
如果某个工具没有出现在这里默认表示它对当前仓库不是必需项AI 也不应因为“系统里刚好装了”就优先使用它。
## 当前环境基线
当前仓库验证基线是:
- **运行环境**WSL2
- **发行版**Ubuntu 24.04 LTS
- **Shell**`bash`
机器可读的环境数据分成两层:
- `GFramework/.ai/environment/tools.raw.yaml`:完整事实采集
- `GFramework/.ai/environment/tools.ai.yaml`:给 AI 看的精简决策提示
AI 应优先读取 `tools.ai.yaml`,只有在需要追溯完整事实时才查看 `tools.raw.yaml`
## 当前项目需要的运行时
| 工具 | 是否需要 | 在 GFramework 中的用途 |
|-----------|------|---------------------------------|
| `dotnet` | 必需 | 构建、测试、打包整个解决方案 |
| `python3` | 推荐 | 运行本地辅助脚本、环境采集和轻量自动化 |
| `node` | 推荐 | 作为文档工具链的 JavaScript 运行时 |
| `bun` | 推荐 | 安装并预览 `docs/` 下的 VitePress 文档站点 |
## 当前项目需要的命令行工具
| 工具 | 是否需要 | 在 GFramework 中的用途 |
|----------|------|-----------------------------------------------|
| `git` | 必需 | 提交代码、查看 diff、审查变更 |
| `bash` | 必需 | 执行仓库脚本,例如 `scripts/validate-csharp-naming.sh` |
| `rg` | 必需 | 在仓库中快速搜索代码和文档 |
| `jq` | 推荐 | 处理 JSON 输出,便于本地脚本和 AI 做结构化检查 |
| `docker` | 可选 | 运行 MegaLinter 等容器化检查工具 |
这里只保留和当前仓库直接相关的 CLI。像 `kubectl``terraform``helm``java`、数据库客户端等工具,即使系统已安装,也不进入正式清单。
## Python 包
Python 包只记录两类内容:
- 当前环境里已经存在、对开发辅助有价值的包
- 明确对 AI/脚本化开发有帮助、后续可能会安装的包
| 包 | 当前状态 | 用途 |
|------------|---------|---------------------|
| `requests` | 当前环境已安装 | 用于简单 HTTP 调用和脚本集成 |
| `rich` | 当前环境已安装 | 用于更易读的终端输出 |
| `openai` | 当前环境可选 | 用于脚本化调用 OpenAI API |
| `tiktoken` | 当前环境可选 | 用于 token 估算和上下文检查 |
| `pydantic` | 当前环境可选 | 用于结构化配置和模式校验 |
| `pytest` | 当前环境可选 | 用于 Python 辅助脚本的小型测试 |
如果某个 Python 包与当前仓库没有直接关系,就不要加入清单。
## AI 使用约定
AI 在这个仓库里应优先使用:
- `rg` 做文本搜索
- `jq` 做 JSON 检查
- `bash` 执行仓库脚本
- `dotnet` 做构建和测试
- `bun` 做文档预览
- `python3 + requests` 做轻量本地辅助脚本
AI 不应直接把原始探测数据当成决策规则;应以 `tools.ai.yaml` 中的推荐和 fallback 为准。如果确实需要引入新工具,应先更新环境清单,再在任务中使用。
## 如何刷新环境清单
使用仓库脚本先采集原始环境,再生成 AI 版本:
```bash
# 输出原始环境清单到终端
bash scripts/collect-dev-environment.sh --check
# 写回原始清单
bash scripts/collect-dev-environment.sh --write
# 由原始清单生成 AI 决策清单
python3 scripts/generate-ai-environment.py
```
## 维护规则
- 目标不是记录“这台机器装了什么”而是记录“GFramework 开发和 AI 协作实际该用什么”。
- 新工具只有在满足以下条件之一时才应加入清单:
- 当前仓库构建、测试、文档或验证直接依赖它
- AI 在当前仓库中会高频使用,且能明显提升效率
- 新贡献者配置当前仓库开发环境时确实需要知道它
- 不满足上述条件的工具,不写入文档,也不写入 `.ai/environment/tools.raw.yaml` / `.ai/environment/tools.ai.yaml`

View File

@ -0,0 +1,268 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(CDPATH='' cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)"
OUTPUT_PATH="${ROOT_DIR}/.ai/environment/tools.raw.yaml"
MODE="${1:---check}"
usage() {
cat <<'EOF'
Usage:
bash scripts/collect-dev-environment.sh --check
bash scripts/collect-dev-environment.sh --write
Modes:
--check Print the raw project-relevant environment inventory.
--write Write the raw inventory to .ai/environment/tools.raw.yaml.
EOF
}
ensure_supported_mode() {
case "${MODE}" in
--check|--write)
;;
*)
usage
exit 1
;;
esac
}
command_path() {
local tool="$1"
if command -v "${tool}" >/dev/null 2>&1; then
command -v "${tool}"
else
printf '%s' ""
fi
}
command_installed() {
local tool="$1"
if command -v "${tool}" >/dev/null 2>&1; then
printf 'true'
else
printf 'false'
fi
}
command_version() {
local tool="$1"
if ! command -v "${tool}" >/dev/null 2>&1; then
printf '%s' "not-installed"
return
fi
case "${tool}" in
dotnet)
dotnet --version 2>/dev/null || printf '%s' "unknown"
;;
python3)
python3 --version 2>/dev/null || printf '%s' "unknown"
;;
node)
node --version 2>/dev/null || printf '%s' "unknown"
;;
npm)
npm --version 2>/dev/null || printf '%s' "unknown"
;;
bun)
bun --version 2>/dev/null || printf '%s' "unknown"
;;
git)
git --version 2>/dev/null || printf '%s' "unknown"
;;
rg)
rg --version 2>/dev/null | head -n 1 || printf '%s' "unknown"
;;
jq)
jq --version 2>/dev/null || printf '%s' "unknown"
;;
docker)
docker --version 2>/dev/null || printf '%s' "unknown"
;;
bash)
bash --version 2>/dev/null | head -n 1 || printf '%s' "unknown"
;;
*)
"${tool}" --version 2>/dev/null | head -n 1 || printf '%s' "unknown"
;;
esac
}
python_package_version() {
local package_name="$1"
python3 - "${package_name}" <<'PY'
from importlib import metadata
import sys
package_name = sys.argv[1]
try:
print(metadata.version(package_name))
except metadata.PackageNotFoundError:
print("not-installed")
PY
}
python_package_installed() {
local package_name="$1"
local version
version="$(python_package_version "${package_name}")"
if [[ "${version}" == "not-installed" ]]; then
printf 'false'
else
printf 'true'
fi
}
read_os_release() {
local key="$1"
python3 - "$key" <<'PY'
import pathlib
import sys
target_key = sys.argv[1]
values = {}
for line in pathlib.Path("/etc/os-release").read_text(encoding="utf-8").splitlines():
if "=" not in line:
continue
key, value = line.split("=", 1)
values[key] = value.strip().strip('"')
print(values.get(target_key, "unknown"))
PY
}
collect_inventory() {
local os_name distro version_id kernel shell_name wsl_enabled wsl_version timestamp
os_name="$(uname -s)"
distro="$(read_os_release PRETTY_NAME)"
version_id="$(read_os_release VERSION_ID)"
kernel="$(uname -r)"
shell_name="$(basename "${SHELL:-bash}")"
timestamp="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
if grep -qi microsoft /proc/version 2>/dev/null; then
wsl_enabled="true"
else
wsl_enabled="false"
fi
if command -v wslinfo >/dev/null 2>&1; then
wsl_version="$(wslinfo --wsl-version 2>/dev/null || printf '%s' "unknown")"
else
wsl_version="unknown"
fi
cat <<EOF
schema_version: 1
generated_at_utc: "${timestamp}"
generator: "scripts/collect-dev-environment.sh"
platform:
os: "${os_name}"
distro: "${distro}"
version: "${version_id}"
kernel: "${kernel}"
wsl: ${wsl_enabled}
wsl_version: "${wsl_version}"
shell: "${shell_name}"
required_runtimes:
dotnet:
installed: $(command_installed dotnet)
version: "$(command_version dotnet)"
path: "$(command_path dotnet)"
purpose: "Builds and tests the GFramework solution."
python3:
installed: $(command_installed python3)
version: "$(command_version python3)"
path: "$(command_path python3)"
purpose: "Runs local automation and environment collection scripts."
node:
installed: $(command_installed node)
version: "$(command_version node)"
path: "$(command_path node)"
purpose: "Provides the JavaScript runtime used by docs tooling."
bun:
installed: $(command_installed bun)
version: "$(command_version bun)"
path: "$(command_path bun)"
purpose: "Installs and previews the VitePress documentation site."
required_tools:
git:
installed: $(command_installed git)
version: "$(command_version git)"
path: "$(command_path git)"
purpose: "Source control and patch review."
bash:
installed: $(command_installed bash)
version: "$(command_version bash)"
path: "$(command_path bash)"
purpose: "Executes repository scripts and shell automation."
rg:
installed: $(command_installed rg)
version: "$(command_version rg)"
path: "$(command_path rg)"
purpose: "Fast text search across the repository."
jq:
installed: $(command_installed jq)
version: "$(command_version jq)"
path: "$(command_path jq)"
purpose: "Inspecting and transforming JSON outputs."
project_tools:
docker:
installed: $(command_installed docker)
version: "$(command_version docker)"
path: "$(command_path docker)"
purpose: "Runs MegaLinter and other containerized validation tools."
python_packages:
requests:
installed: $(python_package_installed requests)
version: "$(python_package_version requests)"
purpose: "Simple HTTP calls in local helper scripts."
rich:
installed: $(python_package_installed rich)
version: "$(python_package_version rich)"
purpose: "Readable CLI output for local Python helpers."
openai:
installed: $(python_package_installed openai)
version: "$(python_package_version openai)"
purpose: "Optional scripted access to OpenAI APIs."
tiktoken:
installed: $(python_package_installed tiktoken)
version: "$(python_package_version tiktoken)"
purpose: "Optional token counting for prompt and context inspection."
pydantic:
installed: $(python_package_installed pydantic)
version: "$(python_package_version pydantic)"
purpose: "Optional typed config and schema validation for helper scripts."
pytest:
installed: $(python_package_installed pytest)
version: "$(python_package_version pytest)"
purpose: "Optional lightweight testing for Python helper scripts."
EOF
}
ensure_supported_mode
if [[ "${MODE}" == "--write" ]]; then
mkdir -p "$(dirname "${OUTPUT_PATH}")"
collect_inventory > "${OUTPUT_PATH}"
printf 'Wrote %s\n' "${OUTPUT_PATH}"
else
collect_inventory
fi

View File

@ -0,0 +1,236 @@
#!/usr/bin/env python3
from __future__ import annotations
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
ROOT_DIR = Path(__file__).resolve().parent.parent
RAW_PATH = ROOT_DIR / ".ai" / "environment" / "tools.raw.yaml"
AI_PATH = ROOT_DIR / ".ai" / "environment" / "tools.ai.yaml"
def parse_scalar(value: str) -> Any:
if value == "true":
return True
if value == "false":
return False
if value.startswith('"') and value.endswith('"'):
return value[1:-1]
return value
def parse_simple_yaml(path: Path) -> dict[str, Any]:
root: dict[str, Any] = {}
stack: list[tuple[int, dict[str, Any]]] = [(-1, root)]
for raw_line in path.read_text(encoding="utf-8").splitlines():
if not raw_line.strip():
continue
if raw_line.lstrip().startswith("#"):
continue
indent = len(raw_line) - len(raw_line.lstrip(" "))
key, _, tail = raw_line.strip().partition(":")
while len(stack) > 1 and indent <= stack[-1][0]:
stack.pop()
current = stack[-1][1]
value = tail.strip()
if value == "":
child: dict[str, Any] = {}
current[key] = child
stack.append((indent, child))
continue
current[key] = parse_scalar(value)
return root
def bool_value(data: dict[str, Any], *keys: str) -> bool:
current: Any = data
for key in keys:
current = current[key]
return bool(current)
def string_value(data: dict[str, Any], *keys: str) -> str:
current: Any = data
for key in keys:
current = current[key]
return str(current)
def choose(preferred: str | None, fallback: str | None) -> str:
if preferred:
return preferred
return fallback or "unavailable"
def available_tool(raw: dict[str, Any], section: str, name: str) -> bool:
return bool_value(raw, section, name, "installed")
def select_tool(
use_for: str,
preferred: str | None,
fallback: str | None,
) -> dict[str, str]:
return {
"preferred": choose(preferred, fallback),
"fallback": fallback or "unavailable",
"use_for": use_for,
}
def build_ai_inventory(raw: dict[str, Any]) -> dict[str, Any]:
has_python = available_tool(raw, "required_runtimes", "python3")
has_node = available_tool(raw, "required_runtimes", "node")
has_bun = available_tool(raw, "required_runtimes", "bun")
has_dotnet = available_tool(raw, "required_runtimes", "dotnet")
has_rg = available_tool(raw, "required_tools", "rg")
has_jq = available_tool(raw, "required_tools", "jq")
has_bash = available_tool(raw, "required_tools", "bash")
has_docker = available_tool(raw, "project_tools", "docker")
search = select_tool(
use_for="Repository text search.",
preferred="rg" if has_rg else None,
fallback="grep",
)
json = select_tool(
use_for="Inspecting or transforming JSON command output.",
preferred="jq" if has_jq else None,
fallback="python3" if has_python else None,
)
scripting = select_tool(
use_for="Non-trivial local automation and helper scripts.",
preferred="python3" if has_python else None,
fallback="bash" if has_bash else None,
)
shell = select_tool(
use_for="Repository shell scripts and command execution.",
preferred="bash" if has_bash else None,
fallback="sh",
)
docs = select_tool(
use_for="Installing and previewing the docs site.",
preferred="bun" if has_bun else None,
fallback="npm" if has_node else None,
)
build = select_tool(
use_for="Build, test, restore, and solution validation.",
preferred="dotnet" if has_dotnet else None,
fallback=None,
)
if bool_value(raw, "platform", "wsl"):
platform_family = "wsl-linux"
else:
platform_family = string_value(raw, "platform", "os").lower()
return {
"schema_version": 1,
"generated_at_utc": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"generated_from": ".ai/environment/tools.raw.yaml",
"generator": "scripts/generate-ai-environment.py",
"platform": {
"family": platform_family,
"os": string_value(raw, "platform", "os"),
"distro": string_value(raw, "platform", "distro"),
"shell": string_value(raw, "platform", "shell"),
},
"capabilities": {
"dotnet": has_dotnet,
"python": has_python,
"node": has_node,
"bun": has_bun,
"docker": has_docker,
"fast_search": has_rg,
"json_cli": has_jq,
},
"tool_selection": {
"search": search,
"json": json,
"shell": shell,
"scripting": scripting,
"docs_package_manager": docs,
"build_and_test": build,
},
"python": {
"available": has_python,
"helper_packages": {
"requests": bool_value(raw, "python_packages", "requests", "installed"),
"rich": bool_value(raw, "python_packages", "rich", "installed"),
"openai": bool_value(raw, "python_packages", "openai", "installed"),
"tiktoken": bool_value(raw, "python_packages", "tiktoken", "installed"),
"pydantic": bool_value(raw, "python_packages", "pydantic", "installed"),
"pytest": bool_value(raw, "python_packages", "pytest", "installed"),
},
},
"preferences": {
"prefer_project_listed_tools": True,
"prefer_python_for_non_trivial_automation": has_python,
"avoid_unlisted_system_tools": True,
},
"rules": [
"Use rg instead of grep for repository search when rg is available.",
"Use jq for JSON inspection; fall back to python3 if jq is unavailable.",
"Prefer python3 over complex bash for non-trivial scripting when python3 is available.",
"Use bun for docs preview workflows when bun is available; otherwise fall back to npm.",
"Use dotnet for repository build and test workflows.",
"Do not assume unrelated system tools are part of the supported project environment.",
],
}
def emit_yaml(value: Any, indent: int = 0) -> list[str]:
prefix = " " * indent
if isinstance(value, dict):
lines: list[str] = []
for key, nested in value.items():
if isinstance(nested, (dict, list)):
lines.append(f"{prefix}{key}:")
lines.extend(emit_yaml(nested, indent + 2))
else:
lines.append(f"{prefix}{key}: {format_scalar(nested)}")
return lines
if isinstance(value, list):
lines = []
for item in value:
if isinstance(item, (dict, list)):
lines.append(f"{prefix}-")
lines.extend(emit_yaml(item, indent + 2))
else:
lines.append(f"{prefix}- {format_scalar(item)}")
return lines
return [f"{prefix}{format_scalar(value)}"]
def format_scalar(value: Any) -> str:
if isinstance(value, bool):
return "true" if value else "false"
if isinstance(value, int):
return str(value)
text = str(value).replace('"', '\\"')
return f'"{text}"'
def main() -> None:
raw = parse_simple_yaml(RAW_PATH)
ai_inventory = build_ai_inventory(raw)
AI_PATH.parent.mkdir(parents=True, exist_ok=True)
AI_PATH.write_text("\n".join(emit_yaml(ai_inventory)) + "\n", encoding="utf-8")
print(f"Wrote {AI_PATH}")
if __name__ == "__main__":
main()