Compare commits

..

100 Commits

Author SHA1 Message Date
世界
275d102587 documentation: Bump version 2026-01-09 22:52:17 +08:00
世界
d553a761a9 Add dial option bind_address_no_port 2026-01-09 22:52:12 +08:00
世界
642719b3b6 platform: Display k based bytes 2026-01-09 22:52:05 +08:00
世界
c325c4ab51 Fix tailscale endpoint 2026-01-09 22:52:05 +08:00
世界
5842fde994 documentation: Bump version 2026-01-08 23:29:31 +08:00
世界
f45291fa79 tailscale: Add system interface support 2026-01-08 23:29:31 +08:00
世界
7f56a3ae8f platform: Fix gomobile build 2026-01-08 23:29:31 +08:00
世界
d5bb1a30dd platform: Update apple build comamnds 2026-01-08 23:29:31 +08:00
世界
4fbf0c4bd6 platform: Improve interface 2026-01-08 23:29:31 +08:00
世界
762b567693 documentation: Bump version 2026-01-07 15:16:51 +08:00
世界
1a511ae711 Downgrade quic-go to v0.57.1 2026-01-07 15:16:51 +08:00
世界
dcf5fde029 Fix linux musl builds 2026-01-07 15:16:51 +08:00
世界
04949914f1 Fix openwrt builds 2026-01-07 15:16:51 +08:00
世界
fbc0fe43ad Add kmod-nft-queue dependency for openwrt package 2026-01-07 15:16:51 +08:00
世界
857319ef3d Fix nfqueue fallback 2026-01-07 15:16:51 +08:00
世界
b48ddee8b1 Disable multipath TCP by default via GODEBUG 2026-01-07 15:16:51 +08:00
世界
8e3d70d197 Fix missing relay support for Tailscale 2026-01-07 15:16:51 +08:00
世界
21468cda93 cronet: Fix windows DNS hijack 2026-01-07 15:16:51 +08:00
世界
2ff67c9ece Fix DNS transports 2026-01-07 15:16:51 +08:00
世界
6ff279ba49 platform: Expose process info 2026-01-07 15:16:51 +08:00
世界
5d15a241e3 Update bypass action behavior for auto redirect 2026-01-07 15:16:51 +08:00
世界
e01a5c96a5 platform: Add GetStartedAt for StartedService 2026-01-07 15:16:51 +08:00
世界
9d40074cfa documentation: Format changes header 2026-01-07 15:16:51 +08:00
世界
4796bce0c2 Add format_docs command for documentation trailing space formatting 2026-01-07 15:16:51 +08:00
世界
fe632ef3ff Fix panic when closing Box before Start with file log output 2026-01-07 15:16:51 +08:00
世界
b15fecf992 Add pre-match support for auto redirect 2026-01-07 15:16:51 +08:00
世界
774269a2bf Fix cronet on iOS 2026-01-07 15:16:51 +08:00
世界
c995273244 Ignore darwin IP_DONTFRAG error when not supported 2026-01-07 15:15:29 +08:00
世界
a80208e7b1 Update tailscale to v1.92.4 2026-01-07 15:15:29 +08:00
世界
60580df446 Update cronet-go to v143.0.7499.109-1 2026-01-07 15:15:29 +08:00
世界
d28305be5f platform: Split library for Android SDK 21 and 23 2026-01-07 15:15:29 +08:00
世界
886a45a204 Fix missing RootPoolFromContext and TimeFuncFromContext in HTTP clients 2026-01-07 15:15:29 +08:00
世界
401d195944 documentation: Minor fixes 2026-01-07 15:15:29 +08:00
世界
d7489c0653 documentation: Add Wi-Fi state shared page 2026-01-07 15:15:29 +08:00
世界
1d6d829dee Fix missing build constraints for linux wifi state monitor 2026-01-07 15:15:28 +08:00
世界
69555fc6ea Update dependencies 2026-01-07 15:15:28 +08:00
世界
655e534ea8 Update quic-go to v0.58.0 2026-01-07 15:15:28 +08:00
世界
5d86f63906 Add Chrome Root Store certificate option
Adds `chrome` as a new certificate store option alongside `mozilla`.
Both stores filter out China-based CA certificates.
2026-01-07 15:15:28 +08:00
世界
2162dab4a1 Fix cronet-go crash 2026-01-07 15:15:28 +08:00
世界
51fd4b7e1a Add trace logging for lifecycle calls
Log start/close operations with timing information for debugging.
2026-01-07 15:15:28 +08:00
世界
ac51a74dbe documentation: Minor fixes 2026-01-07 15:15:28 +08:00
世界
d8a15ecddf Remove certificate_public_key_sha256 for naive 2026-01-07 15:15:28 +08:00
世界
dda91d50bd platform: Use new crash log api 2026-01-07 15:15:28 +08:00
世界
b3aec2997b Fix naive network 2026-01-07 15:15:27 +08:00
世界
f3ae84442a Add QUIC support for naiveproxy 2026-01-07 15:15:27 +08:00
世界
8d373fb004 Add ECH support for NaiveProxy outbound and tls.ech.query_server_name option
- Enable ECH for NaiveProxy outbound with DNS resolver integration
- Add query_server_name option to override domain for ECH HTTPS record queries
- Update cronet-go dependency and remove windows_386 support
2026-01-07 15:15:27 +08:00
世界
db3c3a6621 Fix naiveproxy build 2026-01-07 15:15:27 +08:00
世界
eaaaddb61e Add OpenAI Codex Multiplexer service 2026-01-07 15:15:27 +08:00
世界
e97784d05b Update pricing for CCM service 2026-01-07 15:15:27 +08:00
世界
7a0d211224 release: Upload only other apks 2026-01-07 15:15:26 +08:00
世界
5264802927 Fix bugs and add UoT option for naiveproxy outbound 2026-01-07 15:15:26 +08:00
世界
69896f8b80 Add naiveproxy outbound 2026-01-07 15:15:26 +08:00
世界
0e3552da6f Apply ping destination filter for Windows 2026-01-07 15:15:26 +08:00
世界
0ed29288a0 platform: Add UsePlatformWIFIMonitor to gRPC interface
Align dev-next-grpc with wip2 by adding UsePlatformWIFIMonitor()
to the new PlatformInterface, allowing platform clients to indicate
they handle WIFI monitoring themselves.
2026-01-07 15:15:26 +08:00
世界
774fed206b daemon: Add clear logs 2026-01-07 15:15:26 +08:00
世界
71e1eb1e7f Revert "Stop using DHCP on iOS and tvOS" 2026-01-07 15:15:25 +08:00
世界
5112488d39 platform: Refactoring libbox to use gRPC-based protocol 2026-01-07 15:15:25 +08:00
世界
f228bd5b49 Add Windows WI-FI state support 2026-01-07 15:15:25 +08:00
世界
6424476ace Add Linux WI-FI state support
Support monitoring WIFI state on Linux through:
- NetworkManager (D-Bus)
- IWD (D-Bus)
- wpa_supplicant (control socket)
- ConnMan (D-Bus)
2026-01-07 15:15:24 +08:00
世界
51dc7c4f88 Add more tcp keep alive options
Also update default TCP keep-alive initial period from 10 minutes to 5 minutes.
2026-01-07 15:15:24 +08:00
世界
92c072d849 Update quic-go to v0.57.1 2026-01-07 15:15:24 +08:00
世界
ac7f0c9a3d Fix read credentials for ccm service 2026-01-07 15:15:24 +08:00
世界
31dcdc4f14 Add claude code multiplexer service 2026-01-07 15:15:23 +08:00
世界
3a267e1557 Fix compatibility with MPTCP 2026-01-07 15:15:23 +08:00
世界
73c5575866 Use a more conservative strategy for resolving with systemd-resolved for local DNS server 2026-01-07 15:15:23 +08:00
世界
f00854dd72 Fix missing mTLS support in client options 2026-01-07 15:15:23 +08:00
世界
c04d1414dd Add curve preferences, pinned public key SHA256 and mTLS for TLS options 2026-01-07 15:15:23 +08:00
世界
ec4e411fa7 Fix WireGuard input packet 2026-01-07 15:15:23 +08:00
世界
cc3a4c7dec Update tfo-go to latest 2026-01-07 15:15:22 +08:00
世界
20b0e4c3a0 Remove compatibility codes 2026-01-07 15:15:22 +08:00
世界
5aabc42823 Do not use linkname by default to simplify debugging 2026-01-07 15:15:22 +08:00
世界
f0771fb623 documentation: Update chinese translations 2026-01-07 15:15:22 +08:00
世界
67c79ebac8 Update quic-go to v0.55.0 2026-01-07 15:15:22 +08:00
世界
ef185fed09 Update WireGuard and Tailscale 2026-01-07 15:15:22 +08:00
世界
f8cdc40d62 Fix preConnectionCopy 2026-01-07 15:15:22 +08:00
世界
3bcf072750 Fix ping domain 2026-01-07 15:15:22 +08:00
世界
3ef15fa8aa release: Fix linux build 2026-01-07 15:15:21 +08:00
世界
159418324a Improve ktls rx error handling 2026-01-07 15:15:21 +08:00
世界
0c30128c44 Improve compatibility for kTLS 2026-01-07 15:15:21 +08:00
世界
354330cd80 ktls: Add warning for inappropriate scenarios 2026-01-07 15:15:21 +08:00
世界
b991ae1c91 Add support for kTLS
Reference: https://gitlab.com/go-extension/tls
2026-01-07 15:15:21 +08:00
世界
53fef79ab3 Add proxy support for ICMP echo request 2026-01-07 15:15:21 +08:00
世界
dfe2e083d5 Fix resolve using resolved 2026-01-07 15:15:21 +08:00
世界
ce408b2989 documentation: Update behavior of local DNS server on darwin 2026-01-07 15:15:21 +08:00
世界
395a1aadfa Remove use of ldflags -checklinkname=0 on darwin 2026-01-07 15:15:20 +08:00
世界
905573e386 Fix legacy DNS config 2026-01-07 15:15:20 +08:00
世界
5814753603 Fix rule-set format 2026-01-07 15:15:20 +08:00
世界
8cef967847 documentation: Remove outdated icons 2026-01-07 15:15:20 +08:00
世界
af532b8d05 documentation: Improve local DNS server 2026-01-07 15:15:20 +08:00
世界
5585855367 Stop using DHCP on iOS and tvOS
We do not have the `com.apple.developer.networking.multicast` entitlement and are unable to obtain it for non-technical reasons.
2026-01-07 15:15:20 +08:00
世界
2344b35b0f Improve local DNS server on darwin
We mistakenly believed that `libresolv`'s `search` function worked correctly in NetworkExtension, but it seems only `getaddrinfo` does.

This commit changes the behavior of the `local` DNS server in NetworkExtension to prefer DHCP, falling back to `getaddrinfo` if DHCP servers are unavailable.

It's worth noting that `prefer_go` does not disable DHCP since it respects Dial Fields, but `getaddrinfo` does the opposite. The new behavior only applies to NetworkExtension, not to all scenarios (primarily command-line binaries) as it did previously.

In addition, this commit also improves the DHCP DNS server to use the same robust query logic as `local`.
2026-01-07 15:15:20 +08:00
世界
3a9ae0f91f Use resolved in local DNS server if available 2026-01-07 15:15:20 +08:00
xchacha20-poly1305
4fdbf786e1 Fix rule set version 2026-01-07 15:15:19 +08:00
世界
1e3ea29602 documentation: Add preferred_by route rule item 2026-01-07 15:15:19 +08:00
世界
ac5cb50368 Add preferred_by route rule item 2026-01-07 15:15:19 +08:00
世界
722f9ee971 documentation: Add interface address rule items 2026-01-07 15:15:19 +08:00
世界
3df72d04e9 Add interface address rule items 2026-01-07 15:15:19 +08:00
世界
4f93a515d6 Fix ECH retry support 2026-01-07 15:15:19 +08:00
neletor
a3c9ceca60 Add support for ech retry configs 2026-01-07 15:15:19 +08:00
Zephyruso
a57bdc82e8 Add /dns/flush-clash meta api 2026-01-07 15:15:18 +08:00
106 changed files with 1878 additions and 5558 deletions

View File

@@ -1,23 +0,0 @@
-s dir
--name sing-box
--category net
--license GPL-3.0-or-later
--description "The universal proxy platform."
--url "https://sing-box.sagernet.org/"
--maintainer "nekohasekai <contact-git@sekai.icu>"
--config-files etc/sing-box/config.json
--after-install release/config/sing-box.postinst
release/config/config.json=/etc/sing-box/config.json
release/config/sing-box.service=/usr/lib/systemd/system/sing-box.service
release/config/sing-box@.service=/usr/lib/systemd/system/sing-box@.service
release/config/sing-box.sysusers=/usr/lib/sysusers.d/sing-box.conf
release/config/sing-box.rules=usr/share/polkit-1/rules.d/sing-box.rules
release/config/sing-box-split-dns.xml=/usr/share/dbus-1/system.d/sing-box-split-dns.conf
release/completions/sing-box.bash=/usr/share/bash-completion/completions/sing-box.bash
release/completions/sing-box.fish=/usr/share/fish/vendor_completions.d/sing-box.fish
release/completions/sing-box.zsh=/usr/share/zsh/site-functions/_sing-box
LICENSE=/usr/share/licenses/sing-box/LICENSE

View File

@@ -1 +1 @@
17c7ef18afa63b205e835c6270277b29382eb8e3
92d4602aba0ab6084673af0fe4887dccbc1049a5

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
VERSION="1.25.7"
VERSION="1.25.5"
mkdir -p $HOME/go
cd $HOME/go

View File

@@ -10,4 +10,4 @@ git -C $PROJECTS/cronet-go fetch origin go
go get -x github.com/sagernet/cronet-go/all@$(git -C $PROJECTS/cronet-go rev-parse origin/go)
go get -x github.com/sagernet/cronet-go@$(git -C $PROJECTS/cronet-go rev-parse origin/go)
go mod tidy
git -C $PROJECTS/cronet-go rev-parse origin/go > "$SCRIPT_DIR/CRONET_GO_VERSION"
git -C $PROJECTS/cronet-go rev-parse origin/HEAD > "$SCRIPT_DIR/CRONET_GO_VERSION"

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
SCRIPT_DIR=$(dirname "$0")
PROJECTS=$SCRIPT_DIR/../..
git -C $PROJECTS/cronet-go fetch origin dev
git -C $PROJECTS/cronet-go fetch origin go_dev
go get -x github.com/sagernet/cronet-go/all@$(git -C $PROJECTS/cronet-go rev-parse origin/go_dev)
go get -x github.com/sagernet/cronet-go@$(git -C $PROJECTS/cronet-go rev-parse origin/go_dev)
go mod tidy
git -C $PROJECTS/cronet-go rev-parse origin/dev > "$SCRIPT_DIR/CRONET_GO_VERSION"

View File

@@ -46,7 +46,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Check input version
if: github.event_name == 'workflow_dispatch'
run: |-
@@ -85,27 +85,19 @@ jobs:
- { os: linux, arch: arm, variant: glibc, naive: true, goarm: "7" }
- { os: linux, arch: arm, variant: musl, naive: true, goarm: "7", debian: armhf, rpm: armv7hl, pacman: armv7hl, openwrt: "arm_cortex-a5_vfpv4 arm_cortex-a7_neon-vfpv4 arm_cortex-a7_vfpv4 arm_cortex-a8_vfpv3 arm_cortex-a9_neon arm_cortex-a9_vfpv3-d16 arm_cortex-a15_neon-vfpv4" }
- { os: linux, arch: mipsle, gomips: hardfloat, naive: true, variant: glibc }
- { os: linux, arch: mipsle, gomips: softfloat, naive: true, variant: musl, debian: mipsel, rpm: mipsel, openwrt: "mipsel_24kc mipsel_74kc mipsel_mips32" }
- { os: linux, arch: mips64le, gomips: hardfloat, naive: true, variant: glibc, debian: mips64el, rpm: mips64el }
- { os: linux, arch: riscv64, naive: true, variant: glibc }
- { os: linux, arch: riscv64, naive: true, variant: musl, debian: riscv64, rpm: riscv64, openwrt: "riscv64_generic" }
- { os: linux, arch: loong64, naive: true, variant: glibc }
- { os: linux, arch: loong64, naive: true, variant: musl, debian: loongarch64, rpm: loongarch64, openwrt: "loongarch64_generic" }
- { os: linux, arch: "386", go386: softfloat, openwrt: "i386_pentium-mmx" }
- { os: linux, arch: arm, goarm: "5", openwrt: "arm_arm926ej-s arm_cortex-a7 arm_cortex-a9 arm_fa526 arm_xscale" }
- { os: linux, arch: arm, goarm: "6", debian: armel, rpm: armv6hl, openwrt: "arm_arm1176jzf-s_vfp" }
- { os: linux, arch: mips, gomips: softfloat, openwrt: "mips_24kc mips_4kec mips_mips32" }
- { os: linux, arch: mipsle, gomips: hardfloat, openwrt: "mipsel_24kc_24kf" }
- { os: linux, arch: mipsle, gomips: softfloat }
- { os: linux, arch: mipsle, gomips: hardfloat, debian: mipsel, rpm: mipsel, openwrt: "mipsel_24kc_24kf" }
- { os: linux, arch: mipsle, gomips: softfloat, openwrt: "mipsel_24kc mipsel_74kc mipsel_mips32" }
- { os: linux, arch: mips64, gomips: softfloat, openwrt: "mips64_mips64r2 mips64_octeonplus" }
- { os: linux, arch: mips64le, gomips: hardfloat }
- { os: linux, arch: mips64le, gomips: hardfloat, debian: mips64el, rpm: mips64el }
- { os: linux, arch: mips64le, gomips: softfloat, openwrt: "mips64el_mips64r2" }
- { os: linux, arch: s390x, debian: s390x, rpm: s390x }
- { os: linux, arch: ppc64le, debian: ppc64el, rpm: ppc64le }
- { os: linux, arch: riscv64 }
- { os: linux, arch: loong64 }
- { os: linux, arch: riscv64, debian: riscv64, rpm: riscv64, openwrt: "riscv64_generic" }
- { os: linux, arch: loong64, debian: loongarch64, rpm: loongarch64, openwrt: "loongarch64_generic" }
- { os: windows, arch: amd64, legacy_win7: true, legacy_name: "windows-7" }
- { os: windows, arch: "386", legacy_win7: true, legacy_name: "windows-7" }
@@ -123,7 +115,7 @@ jobs:
if: ${{ ! (matrix.legacy_win7 || matrix.legacy_go124) }}
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Setup Go 1.24
if: matrix.legacy_go124
uses: actions/setup-go@v5
@@ -162,23 +154,14 @@ jobs:
git -C ~/cronet-go fetch --depth=1 origin "$CRONET_GO_VERSION"
git -C ~/cronet-go checkout FETCH_HEAD
git -C ~/cronet-go submodule update --init --recursive --depth=1
- name: Regenerate Debian keyring
if: matrix.naive
run: |
set -xeuo pipefail
rm -f ~/cronet-go/naiveproxy/src/build/linux/sysroot_scripts/keyring.gpg
cd ~/cronet-go
GPG_TTY=/dev/null ./naiveproxy/src/build/linux/sysroot_scripts/generate_keyring.sh
- name: Cache Chromium toolchain
if: matrix.naive
id: cache-chromium-toolchain
uses: actions/cache@v4
with:
path: |
~/cronet-go/naiveproxy/src/third_party/llvm-build/
~/cronet-go/naiveproxy/src/gn/out/
~/cronet-go/naiveproxy/src/chrome/build/pgo_profiles/
~/cronet-go/naiveproxy/src/out/sysroot-build/
~/cronet-go/naiveproxy/src/third_party/llvm-build/Release+Asserts
~/cronet-go/naiveproxy/src/out/sysroot-build
key: chromium-toolchain-${{ matrix.arch }}-${{ matrix.variant }}-${{ hashFiles('.github/CRONET_GO_VERSION') }}
- name: Download Chromium toolchain
if: matrix.naive
@@ -253,8 +236,6 @@ jobs:
GOARCH: ${{ matrix.arch }}
GO386: ${{ matrix.go386 }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
GOMIPS64: ${{ matrix.gomips }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build (musl)
if: matrix.variant == 'musl'
@@ -270,8 +251,6 @@ jobs:
GOARCH: ${{ matrix.arch }}
GO386: ${{ matrix.go386 }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
GOMIPS64: ${{ matrix.gomips }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build (non-variant)
if: matrix.os != 'android' && matrix.variant == ''
@@ -373,7 +352,7 @@ jobs:
sudo gem install fpm
sudo apt-get update
sudo apt-get install -y libarchive-tools
cp .fpm_pacman .fpm
cp .fpm_systemd .fpm
fpm -t pacman \
-v "$PKG_VERSION" \
-p "dist/sing-box_${{ needs.calculate_version.outputs.version }}_${{ matrix.os }}_${{ matrix.pacman }}.pkg.tar.zst" \
@@ -592,7 +571,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Setup Android NDK
id: setup-ndk
uses: nttld/setup-ndk@v1
@@ -682,7 +661,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Setup Android NDK
id: setup-ndk
uses: nttld/setup-ndk@v1
@@ -781,7 +760,7 @@ jobs:
if: matrix.if
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Set tag
if: matrix.if
run: |-

View File

@@ -29,12 +29,10 @@ jobs:
- { arch: arm64, naive: true, docker_platform: "linux/arm64" }
- { arch: "386", naive: true, docker_platform: "linux/386" }
- { arch: arm, goarm: "7", naive: true, docker_platform: "linux/arm/v7" }
- { arch: mipsle, gomips: softfloat, naive: true, docker_platform: "linux/mipsle" }
- { arch: riscv64, naive: true, docker_platform: "linux/riscv64" }
- { arch: loong64, naive: true, docker_platform: "linux/loong64" }
# Non-naive builds
- { arch: arm, goarm: "6", docker_platform: "linux/arm/v6" }
- { arch: ppc64le, docker_platform: "linux/ppc64le" }
- { arch: riscv64, docker_platform: "linux/riscv64" }
- { arch: s390x, docker_platform: "linux/s390x" }
steps:
- name: Get commit to build
@@ -55,7 +53,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.4
- name: Clone cronet-go
if: matrix.naive
run: |
@@ -66,23 +64,14 @@ jobs:
git -C ~/cronet-go fetch --depth=1 origin "$CRONET_GO_VERSION"
git -C ~/cronet-go checkout FETCH_HEAD
git -C ~/cronet-go submodule update --init --recursive --depth=1
- name: Regenerate Debian keyring
if: matrix.naive
run: |
set -xeuo pipefail
rm -f ~/cronet-go/naiveproxy/src/build/linux/sysroot_scripts/keyring.gpg
cd ~/cronet-go
GPG_TTY=/dev/null ./naiveproxy/src/build/linux/sysroot_scripts/generate_keyring.sh
- name: Cache Chromium toolchain
if: matrix.naive
id: cache-chromium-toolchain
uses: actions/cache@v4
with:
path: |
~/cronet-go/naiveproxy/src/third_party/llvm-build/
~/cronet-go/naiveproxy/src/gn/out/
~/cronet-go/naiveproxy/src/chrome/build/pgo_profiles/
~/cronet-go/naiveproxy/src/out/sysroot-build/
~/cronet-go/naiveproxy/src/third_party/llvm-build/Release+Asserts
~/cronet-go/naiveproxy/src/out/sysroot-build
key: chromium-toolchain-${{ matrix.arch }}-musl-${{ hashFiles('.github/CRONET_GO_VERSION') }}
- name: Download Chromium toolchain
if: matrix.naive
@@ -121,7 +110,6 @@ jobs:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
- name: Build (non-naive)
if: ${{ ! matrix.naive }}
run: |
@@ -160,17 +148,15 @@ jobs:
strategy:
fail-fast: true
matrix:
include:
- { platform: "linux/amd64" }
- { platform: "linux/arm/v6" }
- { platform: "linux/arm/v7" }
- { platform: "linux/arm64" }
- { platform: "linux/386" }
# mipsle: no base Docker image available for this platform
- { platform: "linux/ppc64le" }
- { platform: "linux/riscv64" }
- { platform: "linux/s390x" }
- { platform: "linux/loong64", base_image: "ghcr.io/loong64/alpine:edge" }
platform:
- linux/amd64
- linux/arm/v6
- linux/arm/v7
- linux/arm64
- linux/386
- linux/ppc64le
- linux/riscv64
- linux/s390x
steps:
- name: Get commit to build
id: ref
@@ -223,8 +209,6 @@ jobs:
platforms: ${{ matrix.platform }}
context: .
file: Dockerfile.binary
build-args: |
BASE_IMAGE=${{ matrix.base_image || 'alpine' }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
- name: Export digest
@@ -240,7 +224,6 @@ jobs:
if-no-files-found: error
retention-days: 1
merge:
if: github.event_name != 'push'
runs-on: ubuntu-latest
needs:
- build_docker

View File

@@ -34,7 +34,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Check input version
if: github.event_name == 'workflow_dispatch'
run: |-
@@ -61,14 +61,14 @@ jobs:
- { os: linux, arch: arm64, naive: true, debian: arm64, rpm: aarch64, pacman: aarch64 }
- { os: linux, arch: "386", naive: true, debian: i386, rpm: i386 }
- { os: linux, arch: arm, goarm: "7", naive: true, debian: armhf, rpm: armv7hl, pacman: armv7hl }
- { os: linux, arch: mipsle, gomips: softfloat, naive: true, debian: mipsel, rpm: mipsel }
- { os: linux, arch: riscv64, naive: true, debian: riscv64, rpm: riscv64 }
- { os: linux, arch: loong64, naive: true, debian: loongarch64, rpm: loongarch64 }
# Non-naive builds (unsupported architectures)
- { os: linux, arch: arm, goarm: "6", debian: armel, rpm: armv6hl }
- { os: linux, arch: mips64le, debian: mips64el, rpm: mips64el }
- { os: linux, arch: mipsle, debian: mipsel, rpm: mipsel }
- { os: linux, arch: s390x, debian: s390x, rpm: s390x }
- { os: linux, arch: ppc64le, debian: ppc64el, rpm: ppc64le }
- { os: linux, arch: riscv64, debian: riscv64, rpm: riscv64 }
- { os: linux, arch: loong64, debian: loongarch64, rpm: loongarch64 }
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
@@ -77,7 +77,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.7
go-version: ^1.25.5
- name: Clone cronet-go
if: matrix.naive
run: |
@@ -88,23 +88,14 @@ jobs:
git -C ~/cronet-go fetch --depth=1 origin "$CRONET_GO_VERSION"
git -C ~/cronet-go checkout FETCH_HEAD
git -C ~/cronet-go submodule update --init --recursive --depth=1
- name: Regenerate Debian keyring
if: matrix.naive
run: |
set -xeuo pipefail
rm -f ~/cronet-go/naiveproxy/src/build/linux/sysroot_scripts/keyring.gpg
cd ~/cronet-go
GPG_TTY=/dev/null ./naiveproxy/src/build/linux/sysroot_scripts/generate_keyring.sh
- name: Cache Chromium toolchain
if: matrix.naive
id: cache-chromium-toolchain
uses: actions/cache@v4
with:
path: |
~/cronet-go/naiveproxy/src/third_party/llvm-build/
~/cronet-go/naiveproxy/src/gn/out/
~/cronet-go/naiveproxy/src/chrome/build/pgo_profiles/
~/cronet-go/naiveproxy/src/out/sysroot-build/
~/cronet-go/naiveproxy/src/third_party/llvm-build/Release+Asserts
~/cronet-go/naiveproxy/src/out/sysroot-build
key: chromium-toolchain-${{ matrix.arch }}-musl-${{ hashFiles('.github/CRONET_GO_VERSION') }}
- name: Download Chromium toolchain
if: matrix.naive
@@ -143,8 +134,6 @@ jobs:
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
GOMIPS64: ${{ matrix.gomips }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build (non-naive)
if: ${{ ! matrix.naive }}

3
.gitignore vendored
View File

@@ -12,9 +12,6 @@
/*.jar
/*.aar
/*.xcframework/
/experimental/libbox/*.aar
/experimental/libbox/*.xcframework/
/experimental/libbox/*.nupkg
.DS_Store
/config.d/
/venv/

View File

@@ -1,14 +1,8 @@
ARG BASE_IMAGE=alpine
FROM ${BASE_IMAGE}
FROM alpine
ARG TARGETARCH
ARG TARGETVARIANT
LABEL maintainer="nekohasekai <contact-git@sekai.icu>"
RUN set -ex \
&& if command -v apk > /dev/null; then \
apk add --no-cache --upgrade bash tzdata ca-certificates nftables; \
else \
apt-get update && apt-get install -y --no-install-recommends bash tzdata ca-certificates nftables \
&& rm -rf /var/lib/apt/lists/*; \
fi
&& apk add --no-cache --upgrade bash tzdata ca-certificates nftables
COPY sing-box-${TARGETARCH}${TARGETVARIANT} /usr/local/bin/sing-box
ENTRYPOINT ["sing-box"]

View File

@@ -10,8 +10,6 @@ PARAMS = -v -trimpath -ldflags "-X 'github.com/sagernet/sing-box/constant.Versio
MAIN_PARAMS = $(PARAMS) -tags "$(TAGS)"
MAIN = ./cmd/sing-box
PREFIX ?= $(shell go env GOPATH)
SING_FFI ?= sing-ffi
LIBBOX_FFI_CONFIG ?= ./experimental/libbox/ffi.json
.PHONY: test release docs build
@@ -91,12 +89,12 @@ update_android_version:
go run ./cmd/internal/update_android_version
build_android:
cd ../sing-box-for-android && ./gradlew :app:clean :app:assembleOtherRelease :app:assembleOtherLegacyRelease && ./gradlew --stop
cd ../sing-box-for-android && ./gradlew :app:clean :app:assemblePlayRelease :app:assembleOtherRelease && ./gradlew --stop
upload_android:
mkdir -p dist/release_android
cp ../sing-box-for-android/app/build/outputs/apk/other/release/*.apk dist/release_android
cp ../sing-box-for-android/app/build/outputs/apk/otherLegacy/release/*.apk dist/release_android
cp ../sing-box-for-android/app/build/outputs/apk/play/release/*.apk dist/release_android
cp ../sing-box-for-android/app/build/outputs/apk/other/release/*-universal.apk dist/release_android
ghr --replace --draft --prerelease -p 5 "v${VERSION}" dist/release_android
rm -rf dist/release_android
@@ -208,12 +206,12 @@ update_apple_version:
update_macos_version:
MACOS_PROJECT_VERSION=$(shell go run -v ./cmd/internal/app_store_connect next_macos_project_version) go run ./cmd/internal/update_apple_version
release_apple: lib_apple update_apple_version release_ios release_macos release_tvos release_macos_standalone
release_apple: lib_ios update_apple_version release_ios release_macos release_tvos release_macos_standalone
release_apple_beta: update_apple_version release_ios release_macos release_tvos
publish_testflight:
go run -v ./cmd/internal/app_store_connect publish_testflight $(filter-out $@,$(MAKECMDGOALS))
go run -v ./cmd/internal/app_store_connect publish_testflight
prepare_app_store:
go run -v ./cmd/internal/app_store_connect prepare_app_store
@@ -236,21 +234,22 @@ test_stdio:
lib_android:
go run ./cmd/internal/build_libbox -target android
lib_android_debug:
go run ./cmd/internal/build_libbox -target android -debug
lib_apple:
go run ./cmd/internal/build_libbox -target apple
lib_windows:
$(SING_FFI) generate --config $(LIBBOX_FFI_CONFIG) --platform-type csharp
lib_ios:
go run ./cmd/internal/build_libbox -target apple -platform ios -debug
lib_android_new:
$(SING_FFI) generate --config $(LIBBOX_FFI_CONFIG) --platform-type android
lib_apple_new:
$(SING_FFI) generate --config $(LIBBOX_FFI_CONFIG) --platform-type apple
lib:
go run ./cmd/internal/build_libbox -target android
go run ./cmd/internal/build_libbox -target ios
lib_install:
go install -v github.com/sagernet/gomobile/cmd/gomobile@v0.1.12
go install -v github.com/sagernet/gomobile/cmd/gobind@v0.1.12
go install -v github.com/sagernet/gomobile/cmd/gomobile@v0.1.11
go install -v github.com/sagernet/gomobile/cmd/gobind@v0.1.11
docs:
venv/bin/mkdocs serve
@@ -270,6 +269,3 @@ update:
git fetch
git reset FETCH_HEAD --hard
git clean -fdx
%:
@:

View File

@@ -9,10 +9,6 @@ import (
type ConnectionManager interface {
Lifecycle
Count() int
CloseAll()
TrackConn(conn net.Conn) net.Conn
TrackPacketConn(conn net.PacketConn) net.PacketConn
NewConnection(ctx context.Context, this N.Dialer, conn net.Conn, metadata InboundContext, onClose N.CloseHandlerFunc)
NewPacketConnection(ctx context.Context, this N.Dialer, conn N.PacketConn, metadata InboundContext, onClose N.CloseHandlerFunc)
}

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/binary"
"io"
"time"
"github.com/sagernet/sing/common/observable"
@@ -69,11 +68,7 @@ func (s *SavedBinary) MarshalBinary() ([]byte, error) {
if err != nil {
return nil, err
}
_, err = varbin.WriteUvarint(&buffer, uint64(len(s.Content)))
if err != nil {
return nil, err
}
_, err = buffer.Write(s.Content)
err = varbin.Write(&buffer, binary.BigEndian, s.Content)
if err != nil {
return nil, err
}
@@ -81,11 +76,7 @@ func (s *SavedBinary) MarshalBinary() ([]byte, error) {
if err != nil {
return nil, err
}
_, err = varbin.WriteUvarint(&buffer, uint64(len(s.LastEtag)))
if err != nil {
return nil, err
}
_, err = buffer.WriteString(s.LastEtag)
err = varbin.Write(&buffer, binary.BigEndian, s.LastEtag)
if err != nil {
return nil, err
}
@@ -99,12 +90,7 @@ func (s *SavedBinary) UnmarshalBinary(data []byte) error {
if err != nil {
return err
}
contentLength, err := binary.ReadUvarint(reader)
if err != nil {
return err
}
s.Content = make([]byte, contentLength)
_, err = io.ReadFull(reader, s.Content)
err = varbin.Read(reader, binary.BigEndian, &s.Content)
if err != nil {
return err
}
@@ -114,16 +100,10 @@ func (s *SavedBinary) UnmarshalBinary(data []byte) error {
return err
}
s.LastUpdated = time.Unix(lastUpdated, 0)
etagLength, err := binary.ReadUvarint(reader)
err = varbin.Read(reader, binary.BigEndian, &s.LastEtag)
if err != nil {
return err
}
etagBytes := make([]byte, etagLength)
_, err = io.ReadFull(reader, etagBytes)
if err != nil {
return err
}
s.LastEtag = string(etagBytes)
return nil
}

5
box.go
View File

@@ -125,10 +125,7 @@ func New(options Options) (*Box, error) {
ctx = pause.WithDefaultManager(ctx)
experimentalOptions := common.PtrValueOrDefault(options.Experimental)
err := applyDebugOptions(common.PtrValueOrDefault(experimentalOptions.Debug))
if err != nil {
return nil, err
}
applyDebugOptions(common.PtrValueOrDefault(experimentalOptions.Debug))
var needCacheFile bool
var needClashAPI bool
var needV2RayAPI bool

View File

@@ -100,32 +100,11 @@ findVersion:
}
func publishTestflight(ctx context.Context) error {
if len(os.Args) < 3 {
return E.New("platform required: ios, macos, or tvos")
}
var platform asc.Platform
switch os.Args[2] {
case "ios":
platform = asc.PlatformIOS
case "macos":
platform = asc.PlatformMACOS
case "tvos":
platform = asc.PlatformTVOS
default:
return E.New("unknown platform: ", os.Args[2])
}
tagVersion, err := build_shared.ReadTagVersion()
if err != nil {
return err
}
tag := tagVersion.VersionString()
releaseNotes := F.ToString("sing-box ", tagVersion.String())
if len(os.Args) >= 4 {
releaseNotes = strings.Join(os.Args[3:], " ")
}
client := createClient(20 * time.Minute)
log.Info(tag, " list build IDs")
@@ -136,76 +115,97 @@ func publishTestflight(ctx context.Context) error {
buildIDs := common.Map(buildIDsResponse.Data, func(it asc.RelationshipData) string {
return it.ID
})
var platforms []asc.Platform
if len(os.Args) == 3 {
switch os.Args[2] {
case "ios":
platforms = []asc.Platform{asc.PlatformIOS}
case "macos":
platforms = []asc.Platform{asc.PlatformMACOS}
case "tvos":
platforms = []asc.Platform{asc.PlatformTVOS}
default:
return E.New("unknown platform: ", os.Args[2])
}
} else {
platforms = []asc.Platform{
asc.PlatformIOS,
asc.PlatformMACOS,
asc.PlatformTVOS,
}
}
waitingForProcess := false
log.Info(string(platform), " list builds")
for {
builds, _, err := client.Builds.ListBuilds(ctx, &asc.ListBuildsQuery{
FilterApp: []string{appID},
FilterPreReleaseVersionPlatform: []string{string(platform)},
})
if err != nil {
return err
}
build := builds.Data[0]
log.Info(string(platform), " ", tag, " found build: ", build.ID, " (", *build.Attributes.Version, ")")
if !waitingForProcess && (common.Contains(buildIDs, build.ID) || time.Since(build.Attributes.UploadedDate.Time) > 30*time.Minute) {
log.Info(string(platform), " ", tag, " waiting for process")
time.Sleep(15 * time.Second)
continue
}
if *build.Attributes.ProcessingState != "VALID" {
waitingForProcess = true
log.Info(string(platform), " ", tag, " waiting for process: ", *build.Attributes.ProcessingState)
time.Sleep(15 * time.Second)
continue
}
log.Info(string(platform), " ", tag, " list localizations")
localizations, _, err := client.TestFlight.ListBetaBuildLocalizationsForBuild(ctx, build.ID, nil)
if err != nil {
return err
}
localization := common.Find(localizations.Data, func(it asc.BetaBuildLocalization) bool {
return *it.Attributes.Locale == "en-US"
})
if localization.ID == "" {
log.Fatal(string(platform), " ", tag, " no en-US localization found")
}
if localization.Attributes == nil || localization.Attributes.WhatsNew == nil || *localization.Attributes.WhatsNew == "" {
log.Info(string(platform), " ", tag, " update localization")
_, _, err = client.TestFlight.UpdateBetaBuildLocalization(ctx, localization.ID, common.Ptr(releaseNotes))
for _, platform := range platforms {
log.Info(string(platform), " list builds")
for {
builds, _, err := client.Builds.ListBuilds(ctx, &asc.ListBuildsQuery{
FilterApp: []string{appID},
FilterPreReleaseVersionPlatform: []string{string(platform)},
})
if err != nil {
return err
}
}
log.Info(string(platform), " ", tag, " publish")
response, err := client.TestFlight.AddBuildsToBetaGroup(ctx, groupID, []string{build.ID})
if response != nil && (response.StatusCode == http.StatusUnprocessableEntity || response.StatusCode == http.StatusNotFound) {
log.Info("waiting for process")
time.Sleep(15 * time.Second)
continue
} else if err != nil {
return err
}
log.Info(string(platform), " ", tag, " list submissions")
betaSubmissions, _, err := client.TestFlight.ListBetaAppReviewSubmissions(ctx, &asc.ListBetaAppReviewSubmissionsQuery{
FilterBuild: []string{build.ID},
})
if err != nil {
return err
}
if len(betaSubmissions.Data) == 0 {
log.Info(string(platform), " ", tag, " create submission")
_, _, err = client.TestFlight.CreateBetaAppReviewSubmission(ctx, build.ID)
build := builds.Data[0]
if !waitingForProcess && (common.Contains(buildIDs, build.ID) || time.Since(build.Attributes.UploadedDate.Time) > 30*time.Minute) {
log.Info(string(platform), " ", tag, " waiting for process")
time.Sleep(15 * time.Second)
continue
}
if *build.Attributes.ProcessingState != "VALID" {
waitingForProcess = true
log.Info(string(platform), " ", tag, " waiting for process: ", *build.Attributes.ProcessingState)
time.Sleep(15 * time.Second)
continue
}
log.Info(string(platform), " ", tag, " list localizations")
localizations, _, err := client.TestFlight.ListBetaBuildLocalizationsForBuild(ctx, build.ID, nil)
if err != nil {
if strings.Contains(err.Error(), "ANOTHER_BUILD_IN_REVIEW") {
log.Error(err)
break
return err
}
localization := common.Find(localizations.Data, func(it asc.BetaBuildLocalization) bool {
return *it.Attributes.Locale == "en-US"
})
if localization.ID == "" {
log.Fatal(string(platform), " ", tag, " no en-US localization found")
}
if localization.Attributes == nil || localization.Attributes.WhatsNew == nil || *localization.Attributes.WhatsNew == "" {
log.Info(string(platform), " ", tag, " update localization")
_, _, err = client.TestFlight.UpdateBetaBuildLocalization(ctx, localization.ID, common.Ptr(
F.ToString("sing-box ", tagVersion.String()),
))
if err != nil {
return err
}
}
log.Info(string(platform), " ", tag, " publish")
response, err := client.TestFlight.AddBuildsToBetaGroup(ctx, groupID, []string{build.ID})
if response != nil && (response.StatusCode == http.StatusUnprocessableEntity || response.StatusCode == http.StatusNotFound) {
log.Info("waiting for process")
time.Sleep(15 * time.Second)
continue
} else if err != nil {
return err
}
log.Info(string(platform), " ", tag, " list submissions")
betaSubmissions, _, err := client.TestFlight.ListBetaAppReviewSubmissions(ctx, &asc.ListBetaAppReviewSubmissionsQuery{
FilterBuild: []string{build.ID},
})
if err != nil {
return err
}
if len(betaSubmissions.Data) == 0 {
log.Info(string(platform), " ", tag, " create submission")
_, _, err = client.TestFlight.CreateBetaAppReviewSubmission(ctx, build.ID)
if err != nil {
if strings.Contains(err.Error(), "ANOTHER_BUILD_IN_REVIEW") {
log.Error(err)
break
}
return err
}
}
break
}
break
}
return nil
}

View File

@@ -17,17 +17,17 @@ import (
)
var (
debugEnabled bool
target string
platform string
// withTailscale bool
debugEnabled bool
target string
platform string
withTailscale bool
)
func init() {
flag.BoolVar(&debugEnabled, "debug", false, "enable debug")
flag.StringVar(&target, "target", "android", "target platform")
flag.StringVar(&platform, "platform", "", "specify platform")
// flag.BoolVar(&withTailscale, "with-tailscale", false, "build tailscale for iOS and tvOS")
flag.BoolVar(&withTailscale, "with-tailscale", false, "build tailscale for iOS and tvOS")
}
func main() {
@@ -48,7 +48,7 @@ var (
debugFlags []string
sharedTags []string
darwinTags []string
// memcTags []string
memcTags []string
notMemcTags []string
debugTags []string
)
@@ -63,10 +63,9 @@ func init() {
sharedFlags = append(sharedFlags, "-ldflags", "-X github.com/sagernet/sing-box/constant.Version="+currentTag+" -X internal/godebug.defaultGODEBUG=multipathtcp=0 -s -w -buildid= -checklinkname=0")
debugFlags = append(debugFlags, "-ldflags", "-X github.com/sagernet/sing-box/constant.Version="+currentTag+" -X internal/godebug.defaultGODEBUG=multipathtcp=0 -checklinkname=0")
sharedTags = append(sharedTags, "with_gvisor", "with_quic", "with_wireguard", "with_utls", "with_naive_outbound", "with_clash_api", "badlinkname", "tfogo_checklinkname0")
darwinTags = append(darwinTags, "with_dhcp", "grpcnotrace")
// memcTags = append(memcTags, "with_tailscale")
sharedTags = append(sharedTags, "with_tailscale", "ts_omit_logtail", "ts_omit_ssh", "ts_omit_drive", "ts_omit_taildrop", "ts_omit_webclient", "ts_omit_doctor", "ts_omit_capture", "ts_omit_kube", "ts_omit_aws", "ts_omit_synology", "ts_omit_bird")
sharedTags = append(sharedTags, "with_gvisor", "with_quic", "with_wireguard", "with_utls", "with_naive_outbound", "with_clash_api", "with_conntrack", "badlinkname", "tfogo_checklinkname0")
darwinTags = append(darwinTags, "with_dhcp")
memcTags = append(memcTags, "with_tailscale")
notMemcTags = append(notMemcTags, "with_low_memory")
debugTags = append(debugTags, "debug")
}
@@ -165,7 +164,7 @@ func buildAndroid() {
// Build main variant (SDK 23)
mainTags := append([]string{}, sharedTags...)
// mainTags = append(mainTags, memcTags...)
mainTags = append(mainTags, memcTags...)
if debugEnabled {
mainTags = append(mainTags, debugTags...)
}
@@ -177,7 +176,7 @@ func buildAndroid() {
// Build legacy variant (SDK 21, no naive outbound)
legacyTags := filterTags(sharedTags, "with_naive_outbound")
// legacyTags = append(legacyTags, memcTags...)
legacyTags = append(legacyTags, memcTags...)
if debugEnabled {
legacyTags = append(legacyTags, debugTags...)
}
@@ -205,9 +204,9 @@ func buildApple() {
"-libname=box",
"-tags-not-macos=with_low_memory",
}
//if !withTailscale {
// args = append(args, "-tags-macos="+strings.Join(memcTags, ","))
//}
if !withTailscale {
args = append(args, "-tags-macos="+strings.Join(memcTags, ","))
}
if !debugEnabled {
args = append(args, sharedFlags...)
@@ -216,9 +215,9 @@ func buildApple() {
}
tags := append(sharedTags, darwinTags...)
//if withTailscale {
// tags = append(tags, memcTags...)
//}
if withTailscale {
tags = append(tags, memcTags...)
}
if debugEnabled {
tags = append(tags, debugTags...)
}

View File

@@ -71,12 +71,12 @@ func findAndReplace(objectsMap map[string]any, projectContent string, bundleIDLi
indexEnd := indexStart + strings.Index(projectContent[indexStart:], "}")
versionStart := indexStart + strings.Index(projectContent[indexStart:indexEnd], "MARKETING_VERSION = ") + 20
versionEnd := versionStart + strings.Index(projectContent[versionStart:indexEnd], ";")
version := strings.Trim(projectContent[versionStart:versionEnd], "\"")
version := projectContent[versionStart:versionEnd]
if version == newVersion {
continue
}
updated = true
projectContent = projectContent[:versionStart] + "\"" + newVersion + "\"" + projectContent[versionEnd:]
projectContent = projectContent[:versionStart] + newVersion + projectContent[versionEnd:]
}
return projectContent, updated
}

54
common/conntrack/conn.go Normal file
View File

@@ -0,0 +1,54 @@
package conntrack
import (
"io"
"net"
"github.com/sagernet/sing/common/x/list"
)
type Conn struct {
net.Conn
element *list.Element[io.Closer]
}
func NewConn(conn net.Conn) (net.Conn, error) {
connAccess.Lock()
element := openConnection.PushBack(conn)
connAccess.Unlock()
if KillerEnabled {
err := KillerCheck()
if err != nil {
conn.Close()
return nil, err
}
}
return &Conn{
Conn: conn,
element: element,
}, nil
}
func (c *Conn) Close() error {
if c.element.Value != nil {
connAccess.Lock()
if c.element.Value != nil {
openConnection.Remove(c.element)
c.element.Value = nil
}
connAccess.Unlock()
}
return c.Conn.Close()
}
func (c *Conn) Upstream() any {
return c.Conn
}
func (c *Conn) ReaderReplaceable() bool {
return true
}
func (c *Conn) WriterReplaceable() bool {
return true
}

View File

@@ -0,0 +1,35 @@
package conntrack
import (
runtimeDebug "runtime/debug"
"time"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/memory"
)
var (
KillerEnabled bool
MemoryLimit uint64
killerLastCheck time.Time
)
func KillerCheck() error {
if !KillerEnabled {
return nil
}
nowTime := time.Now()
if nowTime.Sub(killerLastCheck) < 3*time.Second {
return nil
}
killerLastCheck = nowTime
if memory.Total() > MemoryLimit {
Close()
go func() {
time.Sleep(time.Second)
runtimeDebug.FreeOSMemory()
}()
return E.New("out of memory")
}
return nil
}

View File

@@ -0,0 +1,55 @@
package conntrack
import (
"io"
"net"
"github.com/sagernet/sing/common/bufio"
"github.com/sagernet/sing/common/x/list"
)
type PacketConn struct {
net.PacketConn
element *list.Element[io.Closer]
}
func NewPacketConn(conn net.PacketConn) (net.PacketConn, error) {
connAccess.Lock()
element := openConnection.PushBack(conn)
connAccess.Unlock()
if KillerEnabled {
err := KillerCheck()
if err != nil {
conn.Close()
return nil, err
}
}
return &PacketConn{
PacketConn: conn,
element: element,
}, nil
}
func (c *PacketConn) Close() error {
if c.element.Value != nil {
connAccess.Lock()
if c.element.Value != nil {
openConnection.Remove(c.element)
c.element.Value = nil
}
connAccess.Unlock()
}
return c.PacketConn.Close()
}
func (c *PacketConn) Upstream() any {
return bufio.NewPacketConn(c.PacketConn)
}
func (c *PacketConn) ReaderReplaceable() bool {
return true
}
func (c *PacketConn) WriterReplaceable() bool {
return true
}

47
common/conntrack/track.go Normal file
View File

@@ -0,0 +1,47 @@
package conntrack
import (
"io"
"sync"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/x/list"
)
var (
connAccess sync.RWMutex
openConnection list.List[io.Closer]
)
func Count() int {
if !Enabled {
return 0
}
return openConnection.Len()
}
func List() []io.Closer {
if !Enabled {
return nil
}
connAccess.RLock()
defer connAccess.RUnlock()
connList := make([]io.Closer, 0, openConnection.Len())
for element := openConnection.Front(); element != nil; element = element.Next() {
connList = append(connList, element.Value)
}
return connList
}
func Close() {
if !Enabled {
return
}
connAccess.Lock()
defer connAccess.Unlock()
for element := openConnection.Front(); element != nil; element = element.Next() {
common.Close(element.Value)
element.Value = nil
}
openConnection.Init()
}

View File

@@ -0,0 +1,5 @@
//go:build !with_conntrack
package conntrack
const Enabled = false

View File

@@ -0,0 +1,5 @@
//go:build with_conntrack
package conntrack
const Enabled = true

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/conntrack"
"github.com/sagernet/sing-box/common/listener"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/option"
@@ -36,7 +37,6 @@ type DefaultDialer struct {
udpAddr4 string
udpAddr6 string
netns string
connectionManager adapter.ConnectionManager
networkManager adapter.NetworkManager
networkStrategy *C.NetworkStrategy
defaultNetworkStrategy bool
@@ -47,7 +47,6 @@ type DefaultDialer struct {
}
func NewDefault(ctx context.Context, options option.DialerOptions) (*DefaultDialer, error) {
connectionManager := service.FromContext[adapter.ConnectionManager](ctx)
networkManager := service.FromContext[adapter.NetworkManager](ctx)
platformInterface := service.FromContext[adapter.PlatformInterface](ctx)
@@ -90,7 +89,7 @@ func NewDefault(ctx context.Context, options option.DialerOptions) (*DefaultDial
if networkManager != nil {
defaultOptions := networkManager.DefaultOptions()
if defaultOptions.BindInterface != "" && !disableDefaultBind {
if defaultOptions.BindInterface != "" {
bindFunc := control.BindToInterface(networkManager.InterfaceFinder(), defaultOptions.BindInterface, -1)
dialer.Control = control.Append(dialer.Control, bindFunc)
listener.Control = control.Append(listener.Control, bindFunc)
@@ -158,11 +157,8 @@ func NewDefault(ctx context.Context, options option.DialerOptions) (*DefaultDial
if keepInterval == 0 {
keepInterval = C.TCPKeepAliveInterval
}
dialer.KeepAliveConfig = net.KeepAliveConfig{
Enable: true,
Idle: keepIdle,
Interval: keepInterval,
}
dialer.KeepAlive = keepIdle
dialer.Control = control.Append(dialer.Control, control.SetKeepAlivePeriod(keepIdle, keepInterval))
}
var udpFragment bool
if options.UDPFragment != nil {
@@ -210,7 +206,6 @@ func NewDefault(ctx context.Context, options option.DialerOptions) (*DefaultDial
udpAddr4: udpAddr4,
udpAddr6: udpAddr6,
netns: options.NetNs,
connectionManager: connectionManager,
networkManager: networkManager,
networkStrategy: networkStrategy,
defaultNetworkStrategy: defaultNetworkStrategy,
@@ -243,7 +238,7 @@ func (d *DefaultDialer) DialContext(ctx context.Context, network string, address
return nil, E.New("domain not resolved")
}
if d.networkStrategy == nil {
return d.trackConn(listener.ListenNetworkNamespace[net.Conn](d.netns, func() (net.Conn, error) {
return trackConn(listener.ListenNetworkNamespace[net.Conn](d.netns, func() (net.Conn, error) {
switch N.NetworkName(network) {
case N.NetworkUDP:
if !address.IsIPv6() {
@@ -308,12 +303,12 @@ func (d *DefaultDialer) DialParallelInterface(ctx context.Context, network strin
if !fastFallback && !isPrimary {
d.networkLastFallback.Store(time.Now())
}
return d.trackConn(conn, nil)
return trackConn(conn, nil)
}
func (d *DefaultDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
if d.networkStrategy == nil {
return d.trackPacketConn(listener.ListenNetworkNamespace[net.PacketConn](d.netns, func() (net.PacketConn, error) {
return trackPacketConn(listener.ListenNetworkNamespace[net.PacketConn](d.netns, func() (net.PacketConn, error) {
if destination.IsIPv6() {
return d.udpListener.ListenPacket(ctx, N.NetworkUDP, d.udpAddr6)
} else if destination.IsIPv4() && !destination.Addr.IsUnspecified() {
@@ -365,23 +360,23 @@ func (d *DefaultDialer) ListenSerialInterfacePacket(ctx context.Context, destina
return nil, err
}
}
return d.trackPacketConn(packetConn, nil)
return trackPacketConn(packetConn, nil)
}
func (d *DefaultDialer) WireGuardControl() control.Func {
return d.udpListener.Control
}
func (d *DefaultDialer) trackConn(conn net.Conn, err error) (net.Conn, error) {
if d.connectionManager == nil || err != nil {
func trackConn(conn net.Conn, err error) (net.Conn, error) {
if !conntrack.Enabled || err != nil {
return conn, err
}
return d.connectionManager.TrackConn(conn), nil
return conntrack.NewConn(conn)
}
func (d *DefaultDialer) trackPacketConn(conn net.PacketConn, err error) (net.PacketConn, error) {
if d.connectionManager == nil || err != nil {
func trackPacketConn(conn net.PacketConn, err error) (net.PacketConn, error) {
if !conntrack.Enabled || err != nil {
return conn, err
}
return d.connectionManager.TrackPacketConn(conn), nil
return conntrack.NewPacketConn(conn)
}

View File

@@ -1,234 +0,0 @@
package geosite
import (
"bufio"
"bytes"
"encoding/binary"
"strings"
"testing"
"github.com/sagernet/sing/common/varbin"
"github.com/stretchr/testify/require"
)
// Old implementation using varbin reflection-based serialization
func oldWriteString(writer varbin.Writer, value string) error {
//nolint:staticcheck
return varbin.Write(writer, binary.BigEndian, value)
}
func oldWriteItem(writer varbin.Writer, item Item) error {
//nolint:staticcheck
return varbin.Write(writer, binary.BigEndian, item)
}
func oldReadString(reader varbin.Reader) (string, error) {
//nolint:staticcheck
return varbin.ReadValue[string](reader, binary.BigEndian)
}
func oldReadItem(reader varbin.Reader) (Item, error) {
//nolint:staticcheck
return varbin.ReadValue[Item](reader, binary.BigEndian)
}
func TestStringCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input string
}{
{"empty", ""},
{"single_char", "a"},
{"ascii", "example.com"},
{"utf8", "测试域名.中国"},
{"special_chars", "\x00\xff\n\t"},
{"127_bytes", strings.Repeat("x", 127)},
{"128_bytes", strings.Repeat("x", 128)},
{"16383_bytes", strings.Repeat("x", 16383)},
{"16384_bytes", strings.Repeat("x", 16384)},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Old write
var oldBuf bytes.Buffer
err := oldWriteString(&oldBuf, tc.input)
require.NoError(t, err)
// New write
var newBuf bytes.Buffer
err = writeString(&newBuf, tc.input)
require.NoError(t, err)
// Bytes must match
require.Equal(t, oldBuf.Bytes(), newBuf.Bytes(),
"mismatch for %q\nold: %x\nnew: %x", tc.name, oldBuf.Bytes(), newBuf.Bytes())
// New write -> old read
readBack, err := oldReadString(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
require.Equal(t, tc.input, readBack)
// Old write -> new read
readBack2, err := readString(bufio.NewReader(bytes.NewReader(oldBuf.Bytes())))
require.NoError(t, err)
require.Equal(t, tc.input, readBack2)
})
}
}
func TestItemCompat(t *testing.T) {
t.Parallel()
// Note: varbin.Write has a bug where struct values (not pointers) don't write their fields
// because field.CanSet() returns false for non-addressable values.
// The old geosite code passed Item values to varbin.Write, which silently wrote nothing.
// The new code correctly writes Type + Value using manual serialization.
// This test verifies the new serialization format and round-trip correctness.
cases := []struct {
name string
input Item
}{
{"domain_empty", Item{Type: RuleTypeDomain, Value: ""}},
{"domain_normal", Item{Type: RuleTypeDomain, Value: "example.com"}},
{"domain_suffix", Item{Type: RuleTypeDomainSuffix, Value: ".example.com"}},
{"domain_keyword", Item{Type: RuleTypeDomainKeyword, Value: "google"}},
{"domain_regex", Item{Type: RuleTypeDomainRegex, Value: `^.*\.example\.com$`}},
{"utf8_domain", Item{Type: RuleTypeDomain, Value: "测试.com"}},
{"long_domain", Item{Type: RuleTypeDomainSuffix, Value: strings.Repeat("a", 200) + ".com"}},
{"128_bytes_value", Item{Type: RuleTypeDomain, Value: strings.Repeat("x", 128)}},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// New write
var newBuf bytes.Buffer
err := newBuf.WriteByte(byte(tc.input.Type))
require.NoError(t, err)
err = writeString(&newBuf, tc.input.Value)
require.NoError(t, err)
// Verify format: Type (1 byte) + Value (uvarint len + bytes)
require.True(t, len(newBuf.Bytes()) >= 1, "output too short")
require.Equal(t, byte(tc.input.Type), newBuf.Bytes()[0], "type byte mismatch")
// New write -> old read (varbin can read correctly when given addressable target)
readBack, err := oldReadItem(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
require.Equal(t, tc.input, readBack)
// New write -> new read
reader := bufio.NewReader(bytes.NewReader(newBuf.Bytes()))
typeByte, err := reader.ReadByte()
require.NoError(t, err)
value, err := readString(reader)
require.NoError(t, err)
require.Equal(t, tc.input, Item{Type: ItemType(typeByte), Value: value})
})
}
}
func TestGeositeWriteReadCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input map[string][]Item
}{
{
"empty_map",
map[string][]Item{},
},
{
"single_code_empty_items",
map[string][]Item{"test": {}},
},
{
"single_code_single_item",
map[string][]Item{"test": {{Type: RuleTypeDomain, Value: "a.com"}}},
},
{
"single_code_multi_items",
map[string][]Item{
"test": {
{Type: RuleTypeDomain, Value: "a.com"},
{Type: RuleTypeDomainSuffix, Value: ".b.com"},
{Type: RuleTypeDomainKeyword, Value: "keyword"},
{Type: RuleTypeDomainRegex, Value: `^.*$`},
},
},
},
{
"multi_code",
map[string][]Item{
"cn": {{Type: RuleTypeDomain, Value: "baidu.com"}, {Type: RuleTypeDomainSuffix, Value: ".cn"}},
"us": {{Type: RuleTypeDomain, Value: "google.com"}},
"jp": {{Type: RuleTypeDomainSuffix, Value: ".jp"}},
},
},
{
"utf8_values",
map[string][]Item{
"test": {
{Type: RuleTypeDomain, Value: "测试.中国"},
{Type: RuleTypeDomainSuffix, Value: ".テスト"},
},
},
},
{
"large_items",
generateLargeItems(1000),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Write using new implementation
var buf bytes.Buffer
err := Write(&buf, tc.input)
require.NoError(t, err)
// Read back and verify
reader, codes, err := NewReader(bytes.NewReader(buf.Bytes()))
require.NoError(t, err)
// Verify all codes exist
codeSet := make(map[string]bool)
for _, code := range codes {
codeSet[code] = true
}
for code := range tc.input {
require.True(t, codeSet[code], "missing code: %s", code)
}
// Verify items match
for code, expectedItems := range tc.input {
items, err := reader.Read(code)
require.NoError(t, err)
require.Equal(t, expectedItems, items, "items mismatch for code: %s", code)
}
})
}
}
func generateLargeItems(count int) map[string][]Item {
items := make([]Item, count)
for i := 0; i < count; i++ {
items[i] = Item{
Type: ItemType(i % 4),
Value: strings.Repeat("x", i%200) + ".com",
}
}
return map[string][]Item{"large": items}
}

View File

@@ -9,6 +9,7 @@ import (
"sync/atomic"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/varbin"
)
type Reader struct {
@@ -77,7 +78,7 @@ func (r *Reader) readMetadata() error {
codeIndex uint64
codeLength uint64
)
code, err = readString(reader)
code, err = varbin.ReadValue[string](reader, binary.BigEndian)
if err != nil {
return err
}
@@ -111,16 +112,9 @@ func (r *Reader) Read(code string) ([]Item, error) {
}
r.bufferedReader.Reset(r.reader)
itemList := make([]Item, r.domainLength[code])
for i := range itemList {
typeByte, err := r.bufferedReader.ReadByte()
if err != nil {
return nil, err
}
itemList[i].Type = ItemType(typeByte)
itemList[i].Value, err = readString(r.bufferedReader)
if err != nil {
return nil, err
}
err = varbin.Read(r.bufferedReader, binary.BigEndian, &itemList)
if err != nil {
return nil, err
}
return itemList, nil
}
@@ -141,18 +135,3 @@ func (r *readCounter) Read(p []byte) (n int, err error) {
}
return
}
func readString(reader io.ByteReader) (string, error) {
length, err := binary.ReadUvarint(reader)
if err != nil {
return "", err
}
bytes := make([]byte, length)
for i := range bytes {
bytes[i], err = reader.ReadByte()
if err != nil {
return "", err
}
}
return string(bytes), nil
}

View File

@@ -2,6 +2,7 @@ package geosite
import (
"bytes"
"encoding/binary"
"sort"
"github.com/sagernet/sing/common/varbin"
@@ -19,11 +20,7 @@ func Write(writer varbin.Writer, domains map[string][]Item) error {
for _, code := range keys {
index[code] = content.Len()
for _, item := range domains[code] {
err := content.WriteByte(byte(item.Type))
if err != nil {
return err
}
err = writeString(content, item.Value)
err := varbin.Write(content, binary.BigEndian, item)
if err != nil {
return err
}
@@ -41,7 +38,7 @@ func Write(writer varbin.Writer, domains map[string][]Item) error {
}
for _, code := range keys {
err = writeString(writer, code)
err = varbin.Write(writer, binary.BigEndian, code)
if err != nil {
return err
}
@@ -62,12 +59,3 @@ func Write(writer varbin.Writer, domains map[string][]Item) error {
return nil
}
func writeString(writer varbin.Writer, value string) error {
_, err := varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
_, err = writer.Write([]byte(value))
return err
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/binary"
"io"
"net/netip"
"unsafe"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/option"
@@ -506,24 +505,7 @@ func writeDefaultRule(writer varbin.Writer, rule option.DefaultHeadlessRule, gen
}
func readRuleItemString(reader varbin.Reader) ([]string, error) {
length, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
result := make([]string, length)
for i := range result {
strLen, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
buf := make([]byte, strLen)
_, err = io.ReadFull(reader, buf)
if err != nil {
return nil, err
}
result[i] = string(buf)
}
return result, nil
return varbin.ReadValue[[]string](reader, binary.BigEndian)
}
func writeRuleItemString(writer varbin.Writer, itemType uint8, value []string) error {
@@ -531,34 +513,11 @@ func writeRuleItemString(writer varbin.Writer, itemType uint8, value []string) e
if err != nil {
return err
}
_, err = varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
for _, s := range value {
_, err = varbin.WriteUvarint(writer, uint64(len(s)))
if err != nil {
return err
}
_, err = writer.Write([]byte(s))
if err != nil {
return err
}
}
return nil
return varbin.Write(writer, binary.BigEndian, value)
}
func readRuleItemUint8[E ~uint8](reader varbin.Reader) ([]E, error) {
length, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
result := make([]E, length)
_, err = io.ReadFull(reader, *(*[]byte)(unsafe.Pointer(&result)))
if err != nil {
return nil, err
}
return result, nil
return varbin.ReadValue[[]E](reader, binary.BigEndian)
}
func writeRuleItemUint8[E ~uint8](writer varbin.Writer, itemType uint8, value []E) error {
@@ -566,25 +525,11 @@ func writeRuleItemUint8[E ~uint8](writer varbin.Writer, itemType uint8, value []
if err != nil {
return err
}
_, err = varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
_, err = writer.Write(*(*[]byte)(unsafe.Pointer(&value)))
return err
return varbin.Write(writer, binary.BigEndian, value)
}
func readRuleItemUint16(reader varbin.Reader) ([]uint16, error) {
length, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
result := make([]uint16, length)
err = binary.Read(reader, binary.BigEndian, result)
if err != nil {
return nil, err
}
return result, nil
return varbin.ReadValue[[]uint16](reader, binary.BigEndian)
}
func writeRuleItemUint16(writer varbin.Writer, itemType uint8, value []uint16) error {
@@ -592,11 +537,7 @@ func writeRuleItemUint16(writer varbin.Writer, itemType uint8, value []uint16) e
if err != nil {
return err
}
_, err = varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
return binary.Write(writer, binary.BigEndian, value)
return varbin.Write(writer, binary.BigEndian, value)
}
func writeRuleItemCIDR(writer varbin.Writer, itemType uint8, value []string) error {

View File

@@ -1,494 +0,0 @@
package srs
import (
"bufio"
"bytes"
"encoding/binary"
"net/netip"
"strings"
"testing"
"unsafe"
M "github.com/sagernet/sing/common/metadata"
"github.com/sagernet/sing/common/varbin"
"github.com/stretchr/testify/require"
"go4.org/netipx"
)
// Old implementations using varbin reflection-based serialization
func oldWriteStringSlice(writer varbin.Writer, value []string) error {
//nolint:staticcheck
return varbin.Write(writer, binary.BigEndian, value)
}
func oldReadStringSlice(reader varbin.Reader) ([]string, error) {
//nolint:staticcheck
return varbin.ReadValue[[]string](reader, binary.BigEndian)
}
func oldWriteUint8Slice[E ~uint8](writer varbin.Writer, value []E) error {
//nolint:staticcheck
return varbin.Write(writer, binary.BigEndian, value)
}
func oldReadUint8Slice[E ~uint8](reader varbin.Reader) ([]E, error) {
//nolint:staticcheck
return varbin.ReadValue[[]E](reader, binary.BigEndian)
}
func oldWriteUint16Slice(writer varbin.Writer, value []uint16) error {
//nolint:staticcheck
return varbin.Write(writer, binary.BigEndian, value)
}
func oldReadUint16Slice(reader varbin.Reader) ([]uint16, error) {
//nolint:staticcheck
return varbin.ReadValue[[]uint16](reader, binary.BigEndian)
}
func oldWritePrefix(writer varbin.Writer, prefix netip.Prefix) error {
//nolint:staticcheck
err := varbin.Write(writer, binary.BigEndian, prefix.Addr().AsSlice())
if err != nil {
return err
}
return binary.Write(writer, binary.BigEndian, uint8(prefix.Bits()))
}
type oldIPRangeData struct {
From []byte
To []byte
}
// Note: The old writeIPSet had a bug where varbin.Write(writer, binary.BigEndian, data)
// with a struct VALUE (not pointer) silently wrote nothing because field.CanSet() returned false.
// This caused IP range data to be missing from the output.
// The new implementation correctly writes all range data.
//
// The old readIPSet used varbin.Read with a pre-allocated slice, which worked because
// slice elements are addressable and CanSet() returns true for them.
//
// For compatibility testing, we verify:
// 1. New write produces correct output with range data
// 2. New read can parse the new format correctly
// 3. Round-trip works correctly
func oldReadIPSet(reader varbin.Reader) (*netipx.IPSet, error) {
version, err := reader.ReadByte()
if err != nil {
return nil, err
}
if version != 1 {
return nil, err
}
var length uint64
err = binary.Read(reader, binary.BigEndian, &length)
if err != nil {
return nil, err
}
ranges := make([]oldIPRangeData, length)
//nolint:staticcheck
err = varbin.Read(reader, binary.BigEndian, &ranges)
if err != nil {
return nil, err
}
mySet := &myIPSet{
rr: make([]myIPRange, len(ranges)),
}
for i, rangeData := range ranges {
mySet.rr[i].from = M.AddrFromIP(rangeData.From)
mySet.rr[i].to = M.AddrFromIP(rangeData.To)
}
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
}
// New write functions (without itemType prefix for testing)
func newWriteStringSlice(writer varbin.Writer, value []string) error {
_, err := varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
for _, s := range value {
_, err = varbin.WriteUvarint(writer, uint64(len(s)))
if err != nil {
return err
}
_, err = writer.Write([]byte(s))
if err != nil {
return err
}
}
return nil
}
func newWriteUint8Slice[E ~uint8](writer varbin.Writer, value []E) error {
_, err := varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
_, err = writer.Write(*(*[]byte)(unsafe.Pointer(&value)))
return err
}
func newWriteUint16Slice(writer varbin.Writer, value []uint16) error {
_, err := varbin.WriteUvarint(writer, uint64(len(value)))
if err != nil {
return err
}
return binary.Write(writer, binary.BigEndian, value)
}
func newWritePrefix(writer varbin.Writer, prefix netip.Prefix) error {
addrSlice := prefix.Addr().AsSlice()
_, err := varbin.WriteUvarint(writer, uint64(len(addrSlice)))
if err != nil {
return err
}
_, err = writer.Write(addrSlice)
if err != nil {
return err
}
return writer.WriteByte(uint8(prefix.Bits()))
}
// Tests
func TestStringSliceCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input []string
}{
{"nil", nil},
{"empty", []string{}},
{"single_empty", []string{""}},
{"single", []string{"test"}},
{"multi", []string{"a", "b", "c"}},
{"with_empty", []string{"a", "", "c"}},
{"utf8", []string{"测试", "テスト", "тест"}},
{"long_string", []string{strings.Repeat("x", 128)}},
{"many_elements", generateStrings(128)},
{"many_elements_256", generateStrings(256)},
{"127_byte_string", []string{strings.Repeat("x", 127)}},
{"128_byte_string", []string{strings.Repeat("x", 128)}},
{"mixed_lengths", []string{"a", strings.Repeat("b", 100), "", strings.Repeat("c", 200)}},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Old write
var oldBuf bytes.Buffer
err := oldWriteStringSlice(&oldBuf, tc.input)
require.NoError(t, err)
// New write
var newBuf bytes.Buffer
err = newWriteStringSlice(&newBuf, tc.input)
require.NoError(t, err)
// Bytes must match
require.Equal(t, oldBuf.Bytes(), newBuf.Bytes(),
"mismatch for %q\nold: %x\nnew: %x", tc.name, oldBuf.Bytes(), newBuf.Bytes())
// New write -> old read
readBack, err := oldReadStringSlice(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
requireStringSliceEqual(t, tc.input, readBack)
// Old write -> new read
readBack2, err := readRuleItemString(bufio.NewReader(bytes.NewReader(oldBuf.Bytes())))
require.NoError(t, err)
requireStringSliceEqual(t, tc.input, readBack2)
})
}
}
func TestUint8SliceCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input []uint8
}{
{"nil", nil},
{"empty", []uint8{}},
{"single_zero", []uint8{0}},
{"single_max", []uint8{255}},
{"multi", []uint8{0, 1, 127, 128, 255}},
{"boundary", []uint8{0x00, 0x7f, 0x80, 0xff}},
{"sequential", generateUint8Slice(256)},
{"127_elements", generateUint8Slice(127)},
{"128_elements", generateUint8Slice(128)},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Old write
var oldBuf bytes.Buffer
err := oldWriteUint8Slice(&oldBuf, tc.input)
require.NoError(t, err)
// New write
var newBuf bytes.Buffer
err = newWriteUint8Slice(&newBuf, tc.input)
require.NoError(t, err)
// Bytes must match
require.Equal(t, oldBuf.Bytes(), newBuf.Bytes(),
"mismatch for %q\nold: %x\nnew: %x", tc.name, oldBuf.Bytes(), newBuf.Bytes())
// New write -> old read
readBack, err := oldReadUint8Slice[uint8](bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
requireUint8SliceEqual(t, tc.input, readBack)
// Old write -> new read
readBack2, err := readRuleItemUint8[uint8](bufio.NewReader(bytes.NewReader(oldBuf.Bytes())))
require.NoError(t, err)
requireUint8SliceEqual(t, tc.input, readBack2)
})
}
}
func TestUint16SliceCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input []uint16
}{
{"nil", nil},
{"empty", []uint16{}},
{"single_zero", []uint16{0}},
{"single_max", []uint16{65535}},
{"multi", []uint16{0, 255, 256, 32767, 32768, 65535}},
{"ports", []uint16{80, 443, 8080, 8443}},
{"127_elements", generateUint16Slice(127)},
{"128_elements", generateUint16Slice(128)},
{"256_elements", generateUint16Slice(256)},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Old write
var oldBuf bytes.Buffer
err := oldWriteUint16Slice(&oldBuf, tc.input)
require.NoError(t, err)
// New write
var newBuf bytes.Buffer
err = newWriteUint16Slice(&newBuf, tc.input)
require.NoError(t, err)
// Bytes must match
require.Equal(t, oldBuf.Bytes(), newBuf.Bytes(),
"mismatch for %q\nold: %x\nnew: %x", tc.name, oldBuf.Bytes(), newBuf.Bytes())
// New write -> old read
readBack, err := oldReadUint16Slice(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
requireUint16SliceEqual(t, tc.input, readBack)
// Old write -> new read
readBack2, err := readRuleItemUint16(bufio.NewReader(bytes.NewReader(oldBuf.Bytes())))
require.NoError(t, err)
requireUint16SliceEqual(t, tc.input, readBack2)
})
}
}
func TestPrefixCompat(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input netip.Prefix
}{
{"ipv4_0", netip.MustParsePrefix("0.0.0.0/0")},
{"ipv4_8", netip.MustParsePrefix("10.0.0.0/8")},
{"ipv4_16", netip.MustParsePrefix("192.168.0.0/16")},
{"ipv4_24", netip.MustParsePrefix("192.168.1.0/24")},
{"ipv4_32", netip.MustParsePrefix("1.2.3.4/32")},
{"ipv6_0", netip.MustParsePrefix("::/0")},
{"ipv6_64", netip.MustParsePrefix("2001:db8::/64")},
{"ipv6_128", netip.MustParsePrefix("::1/128")},
{"ipv6_full", netip.MustParsePrefix("2001:0db8:85a3:0000:0000:8a2e:0370:7334/128")},
{"ipv4_private", netip.MustParsePrefix("172.16.0.0/12")},
{"ipv6_link_local", netip.MustParsePrefix("fe80::/10")},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// Old write
var oldBuf bytes.Buffer
err := oldWritePrefix(&oldBuf, tc.input)
require.NoError(t, err)
// New write
var newBuf bytes.Buffer
err = newWritePrefix(&newBuf, tc.input)
require.NoError(t, err)
// Bytes must match
require.Equal(t, oldBuf.Bytes(), newBuf.Bytes(),
"mismatch for %q\nold: %x\nnew: %x", tc.name, oldBuf.Bytes(), newBuf.Bytes())
// New write -> new read (no old read for prefix)
readBack, err := readPrefix(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
require.Equal(t, tc.input, readBack)
// Old write -> new read
readBack2, err := readPrefix(bufio.NewReader(bytes.NewReader(oldBuf.Bytes())))
require.NoError(t, err)
require.Equal(t, tc.input, readBack2)
})
}
}
func TestIPSetCompat(t *testing.T) {
t.Parallel()
// Note: The old writeIPSet was buggy (varbin.Write with struct values wrote nothing).
// This test verifies the new implementation writes correct data and round-trips correctly.
cases := []struct {
name string
input *netipx.IPSet
}{
{"single_ipv4", buildIPSet("1.2.3.4")},
{"ipv4_range", buildIPSet("192.168.0.0/16")},
{"multi_ipv4", buildIPSet("10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16")},
{"single_ipv6", buildIPSet("::1")},
{"ipv6_range", buildIPSet("2001:db8::/32")},
{"mixed", buildIPSet("10.0.0.0/8", "::1", "2001:db8::/32")},
{"large", buildLargeIPSet(100)},
{"adjacent_ranges", buildIPSet("192.168.0.0/24", "192.168.1.0/24", "192.168.2.0/24")},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
// New write
var newBuf bytes.Buffer
err := writeIPSet(&newBuf, tc.input)
require.NoError(t, err)
// Verify format starts with version byte (1) + uint64 count
require.True(t, len(newBuf.Bytes()) >= 9, "output too short")
require.Equal(t, byte(1), newBuf.Bytes()[0], "version byte mismatch")
// New write -> old read (varbin.Read with pre-allocated slice works correctly)
readBack, err := oldReadIPSet(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
requireIPSetEqual(t, tc.input, readBack)
// New write -> new read
readBack2, err := readIPSet(bufio.NewReader(bytes.NewReader(newBuf.Bytes())))
require.NoError(t, err)
requireIPSetEqual(t, tc.input, readBack2)
})
}
}
// Helper functions
func generateStrings(count int) []string {
result := make([]string, count)
for i := range result {
result[i] = strings.Repeat("x", i%50)
}
return result
}
func generateUint8Slice(count int) []uint8 {
result := make([]uint8, count)
for i := range result {
result[i] = uint8(i % 256)
}
return result
}
func generateUint16Slice(count int) []uint16 {
result := make([]uint16, count)
for i := range result {
result[i] = uint16(i * 257)
}
return result
}
func buildIPSet(cidrs ...string) *netipx.IPSet {
var builder netipx.IPSetBuilder
for _, cidr := range cidrs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
addr, err := netip.ParseAddr(cidr)
if err != nil {
panic(err)
}
builder.Add(addr)
} else {
builder.AddPrefix(prefix)
}
}
set, _ := builder.IPSet()
return set
}
func buildLargeIPSet(count int) *netipx.IPSet {
var builder netipx.IPSetBuilder
for i := 0; i < count; i++ {
prefix := netip.PrefixFrom(netip.AddrFrom4([4]byte{10, byte(i / 256), byte(i % 256), 0}), 24)
builder.AddPrefix(prefix)
}
set, _ := builder.IPSet()
return set
}
func requireStringSliceEqual(t *testing.T, expected, actual []string) {
t.Helper()
if len(expected) == 0 && len(actual) == 0 {
return
}
require.Equal(t, expected, actual)
}
func requireUint8SliceEqual(t *testing.T, expected, actual []uint8) {
t.Helper()
if len(expected) == 0 && len(actual) == 0 {
return
}
require.Equal(t, expected, actual)
}
func requireUint16SliceEqual(t *testing.T, expected, actual []uint16) {
t.Helper()
if len(expected) == 0 && len(actual) == 0 {
return
}
require.Equal(t, expected, actual)
}
func requireIPSetEqual(t *testing.T, expected, actual *netipx.IPSet) {
t.Helper()
expectedRanges := expected.Ranges()
actualRanges := actual.Ranges()
require.Equal(t, len(expectedRanges), len(actualRanges), "range count mismatch")
for i := range expectedRanges {
require.Equal(t, expectedRanges[i].From(), actualRanges[i].From(), "range[%d].from mismatch", i)
require.Equal(t, expectedRanges[i].To(), actualRanges[i].To(), "range[%d].to mismatch", i)
}
}

View File

@@ -2,7 +2,6 @@ package srs
import (
"encoding/binary"
"io"
"net/netip"
M "github.com/sagernet/sing/common/metadata"
@@ -10,16 +9,11 @@ import (
)
func readPrefix(reader varbin.Reader) (netip.Prefix, error) {
addrLen, err := binary.ReadUvarint(reader)
addrSlice, err := varbin.ReadValue[[]byte](reader, binary.BigEndian)
if err != nil {
return netip.Prefix{}, err
}
addrSlice := make([]byte, addrLen)
_, err = io.ReadFull(reader, addrSlice)
if err != nil {
return netip.Prefix{}, err
}
prefixBits, err := reader.ReadByte()
prefixBits, err := varbin.ReadValue[uint8](reader, binary.BigEndian)
if err != nil {
return netip.Prefix{}, err
}
@@ -27,16 +21,11 @@ func readPrefix(reader varbin.Reader) (netip.Prefix, error) {
}
func writePrefix(writer varbin.Writer, prefix netip.Prefix) error {
addrSlice := prefix.Addr().AsSlice()
_, err := varbin.WriteUvarint(writer, uint64(len(addrSlice)))
err := varbin.Write(writer, binary.BigEndian, prefix.Addr().AsSlice())
if err != nil {
return err
}
_, err = writer.Write(addrSlice)
if err != nil {
return err
}
err = writer.WriteByte(uint8(prefix.Bits()))
err = binary.Write(writer, binary.BigEndian, uint8(prefix.Bits()))
if err != nil {
return err
}

View File

@@ -2,11 +2,11 @@ package srs
import (
"encoding/binary"
"io"
"net/netip"
"os"
"unsafe"
"github.com/sagernet/sing/common"
M "github.com/sagernet/sing/common/metadata"
"github.com/sagernet/sing/common/varbin"
@@ -22,6 +22,11 @@ type myIPRange struct {
to netip.Addr
}
type myIPRangeData struct {
From []byte
To []byte
}
func readIPSet(reader varbin.Reader) (*netipx.IPSet, error) {
version, err := reader.ReadByte()
if err != nil {
@@ -36,30 +41,17 @@ func readIPSet(reader varbin.Reader) (*netipx.IPSet, error) {
if err != nil {
return nil, err
}
mySet := &myIPSet{
rr: make([]myIPRange, length),
ranges := make([]myIPRangeData, length)
err = varbin.Read(reader, binary.BigEndian, &ranges)
if err != nil {
return nil, err
}
for i := range mySet.rr {
fromLen, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
fromBytes := make([]byte, fromLen)
_, err = io.ReadFull(reader, fromBytes)
if err != nil {
return nil, err
}
toLen, err := binary.ReadUvarint(reader)
if err != nil {
return nil, err
}
toBytes := make([]byte, toLen)
_, err = io.ReadFull(reader, toBytes)
if err != nil {
return nil, err
}
mySet.rr[i].from = M.AddrFromIP(fromBytes)
mySet.rr[i].to = M.AddrFromIP(toBytes)
mySet := &myIPSet{
rr: make([]myIPRange, len(ranges)),
}
for i, rangeData := range ranges {
mySet.rr[i].from = M.AddrFromIP(rangeData.From)
mySet.rr[i].to = M.AddrFromIP(rangeData.To)
}
return (*netipx.IPSet)(unsafe.Pointer(mySet)), nil
}
@@ -69,27 +61,18 @@ func writeIPSet(writer varbin.Writer, set *netipx.IPSet) error {
if err != nil {
return err
}
mySet := (*myIPSet)(unsafe.Pointer(set))
err = binary.Write(writer, binary.BigEndian, uint64(len(mySet.rr)))
dataList := common.Map((*myIPSet)(unsafe.Pointer(set)).rr, func(rr myIPRange) myIPRangeData {
return myIPRangeData{
From: rr.from.AsSlice(),
To: rr.to.AsSlice(),
}
})
err = binary.Write(writer, binary.BigEndian, uint64(len(dataList)))
if err != nil {
return err
}
for _, rr := range mySet.rr {
fromBytes := rr.from.AsSlice()
_, err = varbin.WriteUvarint(writer, uint64(len(fromBytes)))
if err != nil {
return err
}
_, err = writer.Write(fromBytes)
if err != nil {
return err
}
toBytes := rr.to.AsSlice()
_, err = varbin.WriteUvarint(writer, uint64(len(toBytes)))
if err != nil {
return err
}
_, err = writer.Write(toBytes)
for _, data := range dataList {
err = varbin.Write(writer, binary.BigEndian, data)
if err != nil {
return err
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/sagernet/sing/common/logger"
"github.com/caddyserver/certmagic"
"github.com/libdns/acmedns"
"github.com/libdns/alidns"
"github.com/libdns/cloudflare"
"github.com/mholt/acmez/v3/acme"
@@ -127,13 +126,6 @@ func startACME(ctx context.Context, logger logger.Logger, options option.Inbound
APIToken: dnsOptions.CloudflareOptions.APIToken,
ZoneToken: dnsOptions.CloudflareOptions.ZoneToken,
}
case C.DNSProviderACMEDNS:
solver.DNSProvider = &acmedns.Provider{
Username: dnsOptions.ACMEDNSOptions.Username,
Password: dnsOptions.ACMEDNSOptions.Password,
Subdomain: dnsOptions.ACMEDNSOptions.Subdomain,
ServerURL: dnsOptions.ACMEDNSOptions.ServerURL,
}
default:
return nil, nil, E.New("unsupported ACME DNS01 provider type: " + dnsOptions.Provider)
}

View File

@@ -33,5 +33,4 @@ const (
const (
DNSProviderAliDNS = "alidns"
DNSProviderCloudflare = "cloudflare"
DNSProviderACMEDNS = "acmedns"
)

View File

@@ -30,7 +30,6 @@ const (
TypeSSMAPI = "ssm-api"
TypeCCM = "ccm"
TypeOCM = "ocm"
TypeOOMKiller = "oom-killer"
)
const (
@@ -86,8 +85,6 @@ func ProxyDisplayName(proxyType string) string {
return "Hysteria2"
case TypeAnyTLS:
return "AnyTLS"
case TypeTailscale:
return "Tailscale"
case TypeSelector:
return "Selector"
case TypeURLTest:

View File

@@ -7,12 +7,9 @@ import (
"github.com/sagernet/sing-box"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/urltest"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/experimental/deprecated"
"github.com/sagernet/sing-box/include"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/service"
@@ -23,7 +20,6 @@ type Instance struct {
ctx context.Context
cancel context.CancelFunc
instance *box.Box
connectionManager adapter.ConnectionManager
clashServer adapter.ClashServer
cacheFile adapter.CacheFile
pauseManager pause.Manager
@@ -87,15 +83,6 @@ func (s *StartedService) newInstance(profileContent string, overrideOptions *Ove
}
}
}
if s.oomKiller && C.IsIos {
if !common.Any(options.Services, func(it option.Service) bool {
return it.Type == C.TypeOOMKiller
}) {
options.Services = append(options.Services, option.Service{
Type: C.TypeOOMKiller,
})
}
}
urlTestHistoryStorage := urltest.NewHistoryStorage()
ctx = service.ContextWithPtr(ctx, urlTestHistoryStorage)
i := &Instance{
@@ -113,11 +100,9 @@ func (s *StartedService) newInstance(profileContent string, overrideOptions *Ove
return nil, err
}
i.instance = boxInstance
i.connectionManager = service.FromContext[adapter.ConnectionManager](ctx)
i.clashServer = service.FromContext[adapter.ClashServer](ctx)
i.pauseManager = service.FromContext[pause.Manager](ctx)
i.cacheFile = service.FromContext[adapter.CacheFile](ctx)
log.SetStdLogger(boxInstance.LogFactory().Logger())
return i, nil
}

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/conntrack"
"github.com/sagernet/sing-box/common/urltest"
"github.com/sagernet/sing-box/experimental/clashapi"
"github.com/sagernet/sing-box/experimental/clashapi/trafficontrol"
@@ -35,7 +36,6 @@ type StartedService struct {
handler PlatformHandler
debug bool
logMaxLines int
oomKiller bool
// workingDirectory string
// tempDirectory string
// userID int
@@ -56,9 +56,6 @@ type StartedService struct {
urlTestHistoryStorage *urltest.HistoryStorage
clashModeSubscriber *observable.Subscriber[struct{}]
clashModeObserver *observable.Observer[struct{}]
connectionEventSubscriber *observable.Subscriber[trafficontrol.ConnectionEvent]
connectionEventObserver *observable.Observer[trafficontrol.ConnectionEvent]
}
type ServiceOptions struct {
@@ -67,7 +64,6 @@ type ServiceOptions struct {
Handler PlatformHandler
Debug bool
LogMaxLines int
OOMKiller bool
// WorkingDirectory string
// TempDirectory string
// UserID int
@@ -82,25 +78,22 @@ func NewStartedService(options ServiceOptions) *StartedService {
handler: options.Handler,
debug: options.Debug,
logMaxLines: options.LogMaxLines,
oomKiller: options.OOMKiller,
// workingDirectory: options.WorkingDirectory,
// tempDirectory: options.TempDirectory,
// userID: options.UserID,
// groupID: options.GroupID,
// systemProxyEnabled: options.SystemProxyEnabled,
serviceStatus: &ServiceStatus{Status: ServiceStatus_IDLE},
serviceStatusSubscriber: observable.NewSubscriber[*ServiceStatus](4),
logSubscriber: observable.NewSubscriber[*log.Entry](128),
urlTestSubscriber: observable.NewSubscriber[struct{}](1),
urlTestHistoryStorage: urltest.NewHistoryStorage(),
clashModeSubscriber: observable.NewSubscriber[struct{}](1),
connectionEventSubscriber: observable.NewSubscriber[trafficontrol.ConnectionEvent](256),
serviceStatus: &ServiceStatus{Status: ServiceStatus_IDLE},
serviceStatusSubscriber: observable.NewSubscriber[*ServiceStatus](4),
logSubscriber: observable.NewSubscriber[*log.Entry](128),
urlTestSubscriber: observable.NewSubscriber[struct{}](1),
urlTestHistoryStorage: urltest.NewHistoryStorage(),
clashModeSubscriber: observable.NewSubscriber[struct{}](1),
}
s.serviceStatusObserver = observable.NewObserver(s.serviceStatusSubscriber, 2)
s.logObserver = observable.NewObserver(s.logSubscriber, 64)
s.urlTestObserver = observable.NewObserver(s.urlTestSubscriber, 1)
s.clashModeObserver = observable.NewObserver(s.clashModeSubscriber, 1)
s.connectionEventObserver = observable.NewObserver(s.connectionEventSubscriber, 64)
return s
}
@@ -190,7 +183,6 @@ func (s *StartedService) StartOrReloadService(profileContent string, options *Ov
instance.urlTestHistoryStorage.SetHook(s.urlTestSubscriber)
if instance.clashServer != nil {
instance.clashServer.SetModeUpdateHook(s.clashModeSubscriber)
instance.clashServer.(*clashapi.Server).TrafficManager().SetEventHook(s.connectionEventSubscriber)
}
s.serviceAccess.Unlock()
err = instance.Start()
@@ -209,14 +201,6 @@ func (s *StartedService) StartOrReloadService(profileContent string, options *Ov
return nil
}
func (s *StartedService) Close() {
s.serviceStatusSubscriber.Close()
s.logSubscriber.Close()
s.urlTestSubscriber.Close()
s.clashModeSubscriber.Close()
s.connectionEventSubscriber.Close()
}
func (s *StartedService) CloseService() error {
s.serviceAccess.Lock()
switch s.serviceStatus.Status {
@@ -409,14 +393,12 @@ func (s *StartedService) SubscribeStatus(request *SubscribeStatusRequest, server
func (s *StartedService) readStatus() *Status {
var status Status
status.Memory = memory.Total()
status.Memory = memory.Inuse()
status.Goroutines = int32(runtime.NumGoroutine())
status.ConnectionsOut = int32(conntrack.Count())
s.serviceAccess.RLock()
nowService := s.instance
s.serviceAccess.RUnlock()
if nowService != nil && nowService.connectionManager != nil {
status.ConnectionsOut = int32(nowService.connectionManager.Count())
}
if nowService != nil {
if clashServer := nowService.clashServer; clashServer != nil {
status.TrafficAvailable = true
@@ -684,7 +666,7 @@ func (s *StartedService) SetSystemProxyEnabled(ctx context.Context, request *Set
return nil, err
}
func (s *StartedService) SubscribeConnections(request *SubscribeConnectionsRequest, server grpc.ServerStreamingServer[ConnectionEvents]) error {
func (s *StartedService) SubscribeConnections(request *SubscribeConnectionsRequest, server grpc.ServerStreamingServer[Connections]) error {
err := s.waitForStarted(server.Context())
if err != nil {
return err
@@ -692,260 +674,69 @@ func (s *StartedService) SubscribeConnections(request *SubscribeConnectionsReque
s.serviceAccess.RLock()
boxService := s.instance
s.serviceAccess.RUnlock()
if boxService.clashServer == nil {
return E.New("clash server not available")
}
trafficManager := boxService.clashServer.(*clashapi.Server).TrafficManager()
subscription, done, err := s.connectionEventObserver.Subscribe()
if err != nil {
return err
}
defer s.connectionEventObserver.UnSubscribe(subscription)
connectionSnapshots := make(map[uuid.UUID]connectionSnapshot)
initialEvents := s.buildInitialConnectionState(trafficManager, connectionSnapshots)
err = server.Send(&ConnectionEvents{
Events: initialEvents,
Reset_: true,
})
if err != nil {
return err
}
interval := time.Duration(request.Interval)
if interval <= 0 {
interval = time.Second
}
ticker := time.NewTicker(interval)
ticker := time.NewTicker(time.Duration(request.Interval))
defer ticker.Stop()
trafficManager := boxService.clashServer.(*clashapi.Server).TrafficManager()
var (
connections = make(map[uuid.UUID]*Connection)
outConnections []*Connection
)
for {
outConnections = outConnections[:0]
for _, connection := range trafficManager.Connections() {
outConnections = append(outConnections, newConnection(connections, connection, false))
}
for _, connection := range trafficManager.ClosedConnections() {
outConnections = append(outConnections, newConnection(connections, connection, true))
}
err := server.Send(&Connections{Connections: outConnections})
if err != nil {
return err
}
select {
case <-s.ctx.Done():
return s.ctx.Err()
case <-server.Context().Done():
return server.Context().Err()
case <-done:
return nil
case event := <-subscription:
var pendingEvents []*ConnectionEvent
if protoEvent := s.applyConnectionEvent(event, connectionSnapshots); protoEvent != nil {
pendingEvents = append(pendingEvents, protoEvent)
}
drain:
for {
select {
case event = <-subscription:
if protoEvent := s.applyConnectionEvent(event, connectionSnapshots); protoEvent != nil {
pendingEvents = append(pendingEvents, protoEvent)
}
default:
break drain
}
}
if len(pendingEvents) > 0 {
err = server.Send(&ConnectionEvents{Events: pendingEvents})
if err != nil {
return err
}
}
case <-ticker.C:
protoEvents := s.buildTrafficUpdates(trafficManager, connectionSnapshots)
if len(protoEvents) == 0 {
continue
}
err = server.Send(&ConnectionEvents{Events: protoEvents})
if err != nil {
return err
}
}
}
}
type connectionSnapshot struct {
uplink int64
downlink int64
hadTraffic bool
}
func (s *StartedService) buildInitialConnectionState(manager *trafficontrol.Manager, snapshots map[uuid.UUID]connectionSnapshot) []*ConnectionEvent {
var events []*ConnectionEvent
for _, metadata := range manager.Connections() {
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_NEW,
Id: metadata.ID.String(),
Connection: buildConnectionProto(metadata),
})
snapshots[metadata.ID] = connectionSnapshot{
uplink: metadata.Upload.Load(),
downlink: metadata.Download.Load(),
}
}
for _, metadata := range manager.ClosedConnections() {
conn := buildConnectionProto(metadata)
conn.ClosedAt = metadata.ClosedAt.UnixMilli()
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_NEW,
Id: metadata.ID.String(),
Connection: conn,
})
}
return events
}
func (s *StartedService) applyConnectionEvent(event trafficontrol.ConnectionEvent, snapshots map[uuid.UUID]connectionSnapshot) *ConnectionEvent {
switch event.Type {
case trafficontrol.ConnectionEventNew:
if _, exists := snapshots[event.ID]; exists {
return nil
}
snapshots[event.ID] = connectionSnapshot{
uplink: event.Metadata.Upload.Load(),
downlink: event.Metadata.Download.Load(),
}
return &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_NEW,
Id: event.ID.String(),
Connection: buildConnectionProto(event.Metadata),
}
case trafficontrol.ConnectionEventClosed:
delete(snapshots, event.ID)
protoEvent := &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_CLOSED,
Id: event.ID.String(),
}
closedAt := event.ClosedAt
if closedAt.IsZero() && !event.Metadata.ClosedAt.IsZero() {
closedAt = event.Metadata.ClosedAt
}
if closedAt.IsZero() {
closedAt = time.Now()
}
protoEvent.ClosedAt = closedAt.UnixMilli()
if event.Metadata.ID != uuid.Nil {
conn := buildConnectionProto(event.Metadata)
conn.ClosedAt = protoEvent.ClosedAt
protoEvent.Connection = conn
}
return protoEvent
default:
return nil
}
}
func (s *StartedService) buildTrafficUpdates(manager *trafficontrol.Manager, snapshots map[uuid.UUID]connectionSnapshot) []*ConnectionEvent {
activeConnections := manager.Connections()
activeIndex := make(map[uuid.UUID]*trafficontrol.TrackerMetadata, len(activeConnections))
var events []*ConnectionEvent
for _, metadata := range activeConnections {
activeIndex[metadata.ID] = metadata
currentUpload := metadata.Upload.Load()
currentDownload := metadata.Download.Load()
snapshot, exists := snapshots[metadata.ID]
if !exists {
snapshots[metadata.ID] = connectionSnapshot{
uplink: currentUpload,
downlink: currentDownload,
func newConnection(connections map[uuid.UUID]*Connection, metadata trafficontrol.TrackerMetadata, isClosed bool) *Connection {
if oldConnection, loaded := connections[metadata.ID]; loaded {
if isClosed {
if oldConnection.ClosedAt == 0 {
oldConnection.Uplink = 0
oldConnection.Downlink = 0
oldConnection.ClosedAt = metadata.ClosedAt.UnixMilli()
}
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_NEW,
Id: metadata.ID.String(),
Connection: buildConnectionProto(metadata),
})
continue
}
uplinkDelta := currentUpload - snapshot.uplink
downlinkDelta := currentDownload - snapshot.downlink
if uplinkDelta < 0 || downlinkDelta < 0 {
if snapshot.hadTraffic {
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_UPDATE,
Id: metadata.ID.String(),
UplinkDelta: 0,
DownlinkDelta: 0,
})
}
snapshot.uplink = currentUpload
snapshot.downlink = currentDownload
snapshot.hadTraffic = false
snapshots[metadata.ID] = snapshot
continue
}
if uplinkDelta > 0 || downlinkDelta > 0 {
snapshot.uplink = currentUpload
snapshot.downlink = currentDownload
snapshot.hadTraffic = true
snapshots[metadata.ID] = snapshot
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_UPDATE,
Id: metadata.ID.String(),
UplinkDelta: uplinkDelta,
DownlinkDelta: downlinkDelta,
})
continue
}
if snapshot.hadTraffic {
snapshot.uplink = currentUpload
snapshot.downlink = currentDownload
snapshot.hadTraffic = false
snapshots[metadata.ID] = snapshot
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_UPDATE,
Id: metadata.ID.String(),
UplinkDelta: 0,
DownlinkDelta: 0,
})
return oldConnection
}
lastUplink := oldConnection.UplinkTotal
lastDownlink := oldConnection.DownlinkTotal
uplinkTotal := metadata.Upload.Load()
downlinkTotal := metadata.Download.Load()
oldConnection.Uplink = uplinkTotal - lastUplink
oldConnection.Downlink = downlinkTotal - lastDownlink
oldConnection.UplinkTotal = uplinkTotal
oldConnection.DownlinkTotal = downlinkTotal
return oldConnection
}
var closedIndex map[uuid.UUID]*trafficontrol.TrackerMetadata
for id := range snapshots {
if _, exists := activeIndex[id]; exists {
continue
}
if closedIndex == nil {
closedIndex = make(map[uuid.UUID]*trafficontrol.TrackerMetadata)
for _, metadata := range manager.ClosedConnections() {
closedIndex[metadata.ID] = metadata
}
}
closedAt := time.Now()
var conn *Connection
if metadata, ok := closedIndex[id]; ok {
if !metadata.ClosedAt.IsZero() {
closedAt = metadata.ClosedAt
}
conn = buildConnectionProto(metadata)
conn.ClosedAt = closedAt.UnixMilli()
}
events = append(events, &ConnectionEvent{
Type: ConnectionEventType_CONNECTION_EVENT_CLOSED,
Id: id.String(),
ClosedAt: closedAt.UnixMilli(),
Connection: conn,
})
delete(snapshots, id)
}
return events
}
func buildConnectionProto(metadata *trafficontrol.TrackerMetadata) *Connection {
var rule string
if metadata.Rule != nil {
rule = metadata.Rule.String()
}
uplinkTotal := metadata.Upload.Load()
downlinkTotal := metadata.Download.Load()
uplink := uplinkTotal
downlink := downlinkTotal
var closedAt int64
if !metadata.ClosedAt.IsZero() {
closedAt = metadata.ClosedAt.UnixMilli()
uplink = 0
downlink = 0
}
var processInfo *ProcessInfo
if metadata.Metadata.ProcessInfo != nil {
processInfo = &ProcessInfo{
@@ -956,7 +747,7 @@ func buildConnectionProto(metadata *trafficontrol.TrackerMetadata) *Connection {
PackageName: metadata.Metadata.ProcessInfo.AndroidPackageName,
}
}
return &Connection{
connection := &Connection{
Id: metadata.ID.String(),
Inbound: metadata.Metadata.Inbound,
InboundType: metadata.Metadata.InboundType,
@@ -969,6 +760,9 @@ func buildConnectionProto(metadata *trafficontrol.TrackerMetadata) *Connection {
User: metadata.Metadata.User,
FromOutbound: metadata.Metadata.Outbound,
CreatedAt: metadata.CreatedAt.UnixMilli(),
ClosedAt: closedAt,
Uplink: uplink,
Downlink: downlink,
UplinkTotal: uplinkTotal,
DownlinkTotal: downlinkTotal,
Rule: rule,
@@ -977,6 +771,8 @@ func buildConnectionProto(metadata *trafficontrol.TrackerMetadata) *Connection {
ChainList: metadata.Chain,
ProcessInfo: processInfo,
}
connections[metadata.ID] = connection
return connection
}
func (s *StartedService) CloseConnection(ctx context.Context, request *CloseConnectionRequest) (*emptypb.Empty, error) {
@@ -997,12 +793,7 @@ func (s *StartedService) CloseConnection(ctx context.Context, request *CloseConn
}
func (s *StartedService) CloseAllConnections(ctx context.Context, empty *emptypb.Empty) (*emptypb.Empty, error) {
s.serviceAccess.RLock()
nowService := s.instance
s.serviceAccess.RUnlock()
if nowService != nil && nowService.connectionManager != nil {
nowService.connectionManager.CloseAll()
}
conntrack.Close()
return &emptypb.Empty{}, nil
}

View File

@@ -78,55 +78,104 @@ func (LogLevel) EnumDescriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{0}
}
type ConnectionEventType int32
type ConnectionFilter int32
const (
ConnectionEventType_CONNECTION_EVENT_NEW ConnectionEventType = 0
ConnectionEventType_CONNECTION_EVENT_UPDATE ConnectionEventType = 1
ConnectionEventType_CONNECTION_EVENT_CLOSED ConnectionEventType = 2
ConnectionFilter_ALL ConnectionFilter = 0
ConnectionFilter_ACTIVE ConnectionFilter = 1
ConnectionFilter_CLOSED ConnectionFilter = 2
)
// Enum value maps for ConnectionEventType.
// Enum value maps for ConnectionFilter.
var (
ConnectionEventType_name = map[int32]string{
0: "CONNECTION_EVENT_NEW",
1: "CONNECTION_EVENT_UPDATE",
2: "CONNECTION_EVENT_CLOSED",
ConnectionFilter_name = map[int32]string{
0: "ALL",
1: "ACTIVE",
2: "CLOSED",
}
ConnectionEventType_value = map[string]int32{
"CONNECTION_EVENT_NEW": 0,
"CONNECTION_EVENT_UPDATE": 1,
"CONNECTION_EVENT_CLOSED": 2,
ConnectionFilter_value = map[string]int32{
"ALL": 0,
"ACTIVE": 1,
"CLOSED": 2,
}
)
func (x ConnectionEventType) Enum() *ConnectionEventType {
p := new(ConnectionEventType)
func (x ConnectionFilter) Enum() *ConnectionFilter {
p := new(ConnectionFilter)
*p = x
return p
}
func (x ConnectionEventType) String() string {
func (x ConnectionFilter) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ConnectionEventType) Descriptor() protoreflect.EnumDescriptor {
func (ConnectionFilter) Descriptor() protoreflect.EnumDescriptor {
return file_daemon_started_service_proto_enumTypes[1].Descriptor()
}
func (ConnectionEventType) Type() protoreflect.EnumType {
func (ConnectionFilter) Type() protoreflect.EnumType {
return &file_daemon_started_service_proto_enumTypes[1]
}
func (x ConnectionEventType) Number() protoreflect.EnumNumber {
func (x ConnectionFilter) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ConnectionEventType.Descriptor instead.
func (ConnectionEventType) EnumDescriptor() ([]byte, []int) {
// Deprecated: Use ConnectionFilter.Descriptor instead.
func (ConnectionFilter) EnumDescriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{1}
}
type ConnectionSortBy int32
const (
ConnectionSortBy_DATE ConnectionSortBy = 0
ConnectionSortBy_TRAFFIC ConnectionSortBy = 1
ConnectionSortBy_TOTAL_TRAFFIC ConnectionSortBy = 2
)
// Enum value maps for ConnectionSortBy.
var (
ConnectionSortBy_name = map[int32]string{
0: "DATE",
1: "TRAFFIC",
2: "TOTAL_TRAFFIC",
}
ConnectionSortBy_value = map[string]int32{
"DATE": 0,
"TRAFFIC": 1,
"TOTAL_TRAFFIC": 2,
}
)
func (x ConnectionSortBy) Enum() *ConnectionSortBy {
p := new(ConnectionSortBy)
*p = x
return p
}
func (x ConnectionSortBy) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ConnectionSortBy) Descriptor() protoreflect.EnumDescriptor {
return file_daemon_started_service_proto_enumTypes[2].Descriptor()
}
func (ConnectionSortBy) Type() protoreflect.EnumType {
return &file_daemon_started_service_proto_enumTypes[2]
}
func (x ConnectionSortBy) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ConnectionSortBy.Descriptor instead.
func (ConnectionSortBy) EnumDescriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{2}
}
type ServiceStatus_Type int32
const (
@@ -166,11 +215,11 @@ func (x ServiceStatus_Type) String() string {
}
func (ServiceStatus_Type) Descriptor() protoreflect.EnumDescriptor {
return file_daemon_started_service_proto_enumTypes[2].Descriptor()
return file_daemon_started_service_proto_enumTypes[3].Descriptor()
}
func (ServiceStatus_Type) Type() protoreflect.EnumType {
return &file_daemon_started_service_proto_enumTypes[2]
return &file_daemon_started_service_proto_enumTypes[3]
}
func (x ServiceStatus_Type) Number() protoreflect.EnumNumber {
@@ -1065,6 +1114,8 @@ func (x *SetSystemProxyEnabledRequest) GetEnabled() bool {
type SubscribeConnectionsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Interval int64 `protobuf:"varint,1,opt,name=interval,proto3" json:"interval,omitempty"`
Filter ConnectionFilter `protobuf:"varint,2,opt,name=filter,proto3,enum=daemon.ConnectionFilter" json:"filter,omitempty"`
SortBy ConnectionSortBy `protobuf:"varint,3,opt,name=sortBy,proto3,enum=daemon.ConnectionSortBy" json:"sortBy,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1106,32 +1157,41 @@ func (x *SubscribeConnectionsRequest) GetInterval() int64 {
return 0
}
type ConnectionEvent struct {
func (x *SubscribeConnectionsRequest) GetFilter() ConnectionFilter {
if x != nil {
return x.Filter
}
return ConnectionFilter_ALL
}
func (x *SubscribeConnectionsRequest) GetSortBy() ConnectionSortBy {
if x != nil {
return x.SortBy
}
return ConnectionSortBy_DATE
}
type Connections struct {
state protoimpl.MessageState `protogen:"open.v1"`
Type ConnectionEventType `protobuf:"varint,1,opt,name=type,proto3,enum=daemon.ConnectionEventType" json:"type,omitempty"`
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Connection *Connection `protobuf:"bytes,3,opt,name=connection,proto3" json:"connection,omitempty"`
UplinkDelta int64 `protobuf:"varint,4,opt,name=uplinkDelta,proto3" json:"uplinkDelta,omitempty"`
DownlinkDelta int64 `protobuf:"varint,5,opt,name=downlinkDelta,proto3" json:"downlinkDelta,omitempty"`
ClosedAt int64 `protobuf:"varint,6,opt,name=closedAt,proto3" json:"closedAt,omitempty"`
Connections []*Connection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConnectionEvent) Reset() {
*x = ConnectionEvent{}
func (x *Connections) Reset() {
*x = Connections{}
mi := &file_daemon_started_service_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConnectionEvent) String() string {
func (x *Connections) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConnectionEvent) ProtoMessage() {}
func (*Connections) ProtoMessage() {}
func (x *ConnectionEvent) ProtoReflect() protoreflect.Message {
func (x *Connections) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[17]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -1143,105 +1203,18 @@ func (x *ConnectionEvent) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
// Deprecated: Use ConnectionEvent.ProtoReflect.Descriptor instead.
func (*ConnectionEvent) Descriptor() ([]byte, []int) {
// Deprecated: Use Connections.ProtoReflect.Descriptor instead.
func (*Connections) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{17}
}
func (x *ConnectionEvent) GetType() ConnectionEventType {
func (x *Connections) GetConnections() []*Connection {
if x != nil {
return x.Type
}
return ConnectionEventType_CONNECTION_EVENT_NEW
}
func (x *ConnectionEvent) GetId() string {
if x != nil {
return x.Id
}
return ""
}
func (x *ConnectionEvent) GetConnection() *Connection {
if x != nil {
return x.Connection
return x.Connections
}
return nil
}
func (x *ConnectionEvent) GetUplinkDelta() int64 {
if x != nil {
return x.UplinkDelta
}
return 0
}
func (x *ConnectionEvent) GetDownlinkDelta() int64 {
if x != nil {
return x.DownlinkDelta
}
return 0
}
func (x *ConnectionEvent) GetClosedAt() int64 {
if x != nil {
return x.ClosedAt
}
return 0
}
type ConnectionEvents struct {
state protoimpl.MessageState `protogen:"open.v1"`
Events []*ConnectionEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"`
Reset_ bool `protobuf:"varint,2,opt,name=reset,proto3" json:"reset,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConnectionEvents) Reset() {
*x = ConnectionEvents{}
mi := &file_daemon_started_service_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConnectionEvents) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConnectionEvents) ProtoMessage() {}
func (x *ConnectionEvents) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConnectionEvents.ProtoReflect.Descriptor instead.
func (*ConnectionEvents) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{18}
}
func (x *ConnectionEvents) GetEvents() []*ConnectionEvent {
if x != nil {
return x.Events
}
return nil
}
func (x *ConnectionEvents) GetReset_() bool {
if x != nil {
return x.Reset_
}
return false
}
type Connection struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -1272,7 +1245,7 @@ type Connection struct {
func (x *Connection) Reset() {
*x = Connection{}
mi := &file_daemon_started_service_proto_msgTypes[19]
mi := &file_daemon_started_service_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1284,7 +1257,7 @@ func (x *Connection) String() string {
func (*Connection) ProtoMessage() {}
func (x *Connection) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[19]
mi := &file_daemon_started_service_proto_msgTypes[18]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1297,7 +1270,7 @@ func (x *Connection) ProtoReflect() protoreflect.Message {
// Deprecated: Use Connection.ProtoReflect.Descriptor instead.
func (*Connection) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{19}
return file_daemon_started_service_proto_rawDescGZIP(), []int{18}
}
func (x *Connection) GetId() string {
@@ -1467,7 +1440,7 @@ type ProcessInfo struct {
func (x *ProcessInfo) Reset() {
*x = ProcessInfo{}
mi := &file_daemon_started_service_proto_msgTypes[20]
mi := &file_daemon_started_service_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1479,7 +1452,7 @@ func (x *ProcessInfo) String() string {
func (*ProcessInfo) ProtoMessage() {}
func (x *ProcessInfo) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[20]
mi := &file_daemon_started_service_proto_msgTypes[19]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1492,7 +1465,7 @@ func (x *ProcessInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead.
func (*ProcessInfo) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{20}
return file_daemon_started_service_proto_rawDescGZIP(), []int{19}
}
func (x *ProcessInfo) GetProcessId() uint32 {
@@ -1539,7 +1512,7 @@ type CloseConnectionRequest struct {
func (x *CloseConnectionRequest) Reset() {
*x = CloseConnectionRequest{}
mi := &file_daemon_started_service_proto_msgTypes[21]
mi := &file_daemon_started_service_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1551,7 +1524,7 @@ func (x *CloseConnectionRequest) String() string {
func (*CloseConnectionRequest) ProtoMessage() {}
func (x *CloseConnectionRequest) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[21]
mi := &file_daemon_started_service_proto_msgTypes[20]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1564,7 +1537,7 @@ func (x *CloseConnectionRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use CloseConnectionRequest.ProtoReflect.Descriptor instead.
func (*CloseConnectionRequest) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{21}
return file_daemon_started_service_proto_rawDescGZIP(), []int{20}
}
func (x *CloseConnectionRequest) GetId() string {
@@ -1583,7 +1556,7 @@ type DeprecatedWarnings struct {
func (x *DeprecatedWarnings) Reset() {
*x = DeprecatedWarnings{}
mi := &file_daemon_started_service_proto_msgTypes[22]
mi := &file_daemon_started_service_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1595,7 +1568,7 @@ func (x *DeprecatedWarnings) String() string {
func (*DeprecatedWarnings) ProtoMessage() {}
func (x *DeprecatedWarnings) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[22]
mi := &file_daemon_started_service_proto_msgTypes[21]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1608,7 +1581,7 @@ func (x *DeprecatedWarnings) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeprecatedWarnings.ProtoReflect.Descriptor instead.
func (*DeprecatedWarnings) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{22}
return file_daemon_started_service_proto_rawDescGZIP(), []int{21}
}
func (x *DeprecatedWarnings) GetWarnings() []*DeprecatedWarning {
@@ -1629,7 +1602,7 @@ type DeprecatedWarning struct {
func (x *DeprecatedWarning) Reset() {
*x = DeprecatedWarning{}
mi := &file_daemon_started_service_proto_msgTypes[23]
mi := &file_daemon_started_service_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1641,7 +1614,7 @@ func (x *DeprecatedWarning) String() string {
func (*DeprecatedWarning) ProtoMessage() {}
func (x *DeprecatedWarning) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[23]
mi := &file_daemon_started_service_proto_msgTypes[22]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1654,7 +1627,7 @@ func (x *DeprecatedWarning) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeprecatedWarning.ProtoReflect.Descriptor instead.
func (*DeprecatedWarning) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{23}
return file_daemon_started_service_proto_rawDescGZIP(), []int{22}
}
func (x *DeprecatedWarning) GetMessage() string {
@@ -1687,7 +1660,7 @@ type StartedAt struct {
func (x *StartedAt) Reset() {
*x = StartedAt{}
mi := &file_daemon_started_service_proto_msgTypes[24]
mi := &file_daemon_started_service_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1699,7 +1672,7 @@ func (x *StartedAt) String() string {
func (*StartedAt) ProtoMessage() {}
func (x *StartedAt) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[24]
mi := &file_daemon_started_service_proto_msgTypes[23]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1712,7 +1685,7 @@ func (x *StartedAt) ProtoReflect() protoreflect.Message {
// Deprecated: Use StartedAt.ProtoReflect.Descriptor instead.
func (*StartedAt) Descriptor() ([]byte, []int) {
return file_daemon_started_service_proto_rawDescGZIP(), []int{24}
return file_daemon_started_service_proto_rawDescGZIP(), []int{23}
}
func (x *StartedAt) GetStartedAt() int64 {
@@ -1732,7 +1705,7 @@ type Log_Message struct {
func (x *Log_Message) Reset() {
*x = Log_Message{}
mi := &file_daemon_started_service_proto_msgTypes[25]
mi := &file_daemon_started_service_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1744,7 +1717,7 @@ func (x *Log_Message) String() string {
func (*Log_Message) ProtoMessage() {}
func (x *Log_Message) ProtoReflect() protoreflect.Message {
mi := &file_daemon_started_service_proto_msgTypes[25]
mi := &file_daemon_started_service_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1845,21 +1818,13 @@ const file_daemon_started_service_proto_rawDesc = "" +
"\tavailable\x18\x01 \x01(\bR\tavailable\x12\x18\n" +
"\aenabled\x18\x02 \x01(\bR\aenabled\"8\n" +
"\x1cSetSystemProxyEnabledRequest\x12\x18\n" +
"\aenabled\x18\x01 \x01(\bR\aenabled\"9\n" +
"\aenabled\x18\x01 \x01(\bR\aenabled\"\x9d\x01\n" +
"\x1bSubscribeConnectionsRequest\x12\x1a\n" +
"\binterval\x18\x01 \x01(\x03R\binterval\"\xea\x01\n" +
"\x0fConnectionEvent\x12/\n" +
"\x04type\x18\x01 \x01(\x0e2\x1b.daemon.ConnectionEventTypeR\x04type\x12\x0e\n" +
"\x02id\x18\x02 \x01(\tR\x02id\x122\n" +
"\n" +
"connection\x18\x03 \x01(\v2\x12.daemon.ConnectionR\n" +
"connection\x12 \n" +
"\vuplinkDelta\x18\x04 \x01(\x03R\vuplinkDelta\x12$\n" +
"\rdownlinkDelta\x18\x05 \x01(\x03R\rdownlinkDelta\x12\x1a\n" +
"\bclosedAt\x18\x06 \x01(\x03R\bclosedAt\"Y\n" +
"\x10ConnectionEvents\x12/\n" +
"\x06events\x18\x01 \x03(\v2\x17.daemon.ConnectionEventR\x06events\x12\x14\n" +
"\x05reset\x18\x02 \x01(\bR\x05reset\"\x95\x05\n" +
"\binterval\x18\x01 \x01(\x03R\binterval\x120\n" +
"\x06filter\x18\x02 \x01(\x0e2\x18.daemon.ConnectionFilterR\x06filter\x120\n" +
"\x06sortBy\x18\x03 \x01(\x0e2\x18.daemon.ConnectionSortByR\x06sortBy\"C\n" +
"\vConnections\x124\n" +
"\vconnections\x18\x01 \x03(\v2\x12.daemon.ConnectionR\vconnections\"\x95\x05\n" +
"\n" +
"Connection\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" +
@@ -1908,11 +1873,17 @@ const file_daemon_started_service_proto_rawDesc = "" +
"\x04WARN\x10\x03\x12\b\n" +
"\x04INFO\x10\x04\x12\t\n" +
"\x05DEBUG\x10\x05\x12\t\n" +
"\x05TRACE\x10\x06*i\n" +
"\x13ConnectionEventType\x12\x18\n" +
"\x14CONNECTION_EVENT_NEW\x10\x00\x12\x1b\n" +
"\x17CONNECTION_EVENT_UPDATE\x10\x01\x12\x1b\n" +
"\x17CONNECTION_EVENT_CLOSED\x10\x022\xe5\v\n" +
"\x05TRACE\x10\x06*3\n" +
"\x10ConnectionFilter\x12\a\n" +
"\x03ALL\x10\x00\x12\n" +
"\n" +
"\x06ACTIVE\x10\x01\x12\n" +
"\n" +
"\x06CLOSED\x10\x02*<\n" +
"\x10ConnectionSortBy\x12\b\n" +
"\x04DATE\x10\x00\x12\v\n" +
"\aTRAFFIC\x10\x01\x12\x11\n" +
"\rTOTAL_TRAFFIC\x10\x022\xe0\v\n" +
"\x0eStartedService\x12=\n" +
"\vStopService\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12?\n" +
"\rReloadService\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\x12K\n" +
@@ -1929,8 +1900,8 @@ const file_daemon_started_service_proto_rawDesc = "" +
"\x0eSelectOutbound\x12\x1d.daemon.SelectOutboundRequest\x1a\x16.google.protobuf.Empty\"\x00\x12I\n" +
"\x0eSetGroupExpand\x12\x1d.daemon.SetGroupExpandRequest\x1a\x16.google.protobuf.Empty\"\x00\x12K\n" +
"\x14GetSystemProxyStatus\x12\x16.google.protobuf.Empty\x1a\x19.daemon.SystemProxyStatus\"\x00\x12W\n" +
"\x15SetSystemProxyEnabled\x12$.daemon.SetSystemProxyEnabledRequest\x1a\x16.google.protobuf.Empty\"\x00\x12Y\n" +
"\x14SubscribeConnections\x12#.daemon.SubscribeConnectionsRequest\x1a\x18.daemon.ConnectionEvents\"\x000\x01\x12K\n" +
"\x15SetSystemProxyEnabled\x12$.daemon.SetSystemProxyEnabledRequest\x1a\x16.google.protobuf.Empty\"\x00\x12T\n" +
"\x14SubscribeConnections\x12#.daemon.SubscribeConnectionsRequest\x1a\x13.daemon.Connections\"\x000\x01\x12K\n" +
"\x0fCloseConnection\x12\x1e.daemon.CloseConnectionRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n" +
"\x13CloseAllConnections\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12M\n" +
"\x15GetDeprecatedWarnings\x12\x16.google.protobuf.Empty\x1a\x1a.daemon.DeprecatedWarnings\"\x00\x12;\n" +
@@ -1949,31 +1920,31 @@ func file_daemon_started_service_proto_rawDescGZIP() []byte {
}
var (
file_daemon_started_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
file_daemon_started_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
file_daemon_started_service_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
file_daemon_started_service_proto_msgTypes = make([]protoimpl.MessageInfo, 25)
file_daemon_started_service_proto_goTypes = []any{
(LogLevel)(0), // 0: daemon.LogLevel
(ConnectionEventType)(0), // 1: daemon.ConnectionEventType
(ServiceStatus_Type)(0), // 2: daemon.ServiceStatus.Type
(*ServiceStatus)(nil), // 3: daemon.ServiceStatus
(*ReloadServiceRequest)(nil), // 4: daemon.ReloadServiceRequest
(*SubscribeStatusRequest)(nil), // 5: daemon.SubscribeStatusRequest
(*Log)(nil), // 6: daemon.Log
(*DefaultLogLevel)(nil), // 7: daemon.DefaultLogLevel
(*Status)(nil), // 8: daemon.Status
(*Groups)(nil), // 9: daemon.Groups
(*Group)(nil), // 10: daemon.Group
(*GroupItem)(nil), // 11: daemon.GroupItem
(*URLTestRequest)(nil), // 12: daemon.URLTestRequest
(*SelectOutboundRequest)(nil), // 13: daemon.SelectOutboundRequest
(*SetGroupExpandRequest)(nil), // 14: daemon.SetGroupExpandRequest
(*ClashMode)(nil), // 15: daemon.ClashMode
(*ClashModeStatus)(nil), // 16: daemon.ClashModeStatus
(*SystemProxyStatus)(nil), // 17: daemon.SystemProxyStatus
(*SetSystemProxyEnabledRequest)(nil), // 18: daemon.SetSystemProxyEnabledRequest
(*SubscribeConnectionsRequest)(nil), // 19: daemon.SubscribeConnectionsRequest
(*ConnectionEvent)(nil), // 20: daemon.ConnectionEvent
(*ConnectionEvents)(nil), // 21: daemon.ConnectionEvents
(ConnectionFilter)(0), // 1: daemon.ConnectionFilter
(ConnectionSortBy)(0), // 2: daemon.ConnectionSortBy
(ServiceStatus_Type)(0), // 3: daemon.ServiceStatus.Type
(*ServiceStatus)(nil), // 4: daemon.ServiceStatus
(*ReloadServiceRequest)(nil), // 5: daemon.ReloadServiceRequest
(*SubscribeStatusRequest)(nil), // 6: daemon.SubscribeStatusRequest
(*Log)(nil), // 7: daemon.Log
(*DefaultLogLevel)(nil), // 8: daemon.DefaultLogLevel
(*Status)(nil), // 9: daemon.Status
(*Groups)(nil), // 10: daemon.Groups
(*Group)(nil), // 11: daemon.Group
(*GroupItem)(nil), // 12: daemon.GroupItem
(*URLTestRequest)(nil), // 13: daemon.URLTestRequest
(*SelectOutboundRequest)(nil), // 14: daemon.SelectOutboundRequest
(*SetGroupExpandRequest)(nil), // 15: daemon.SetGroupExpandRequest
(*ClashMode)(nil), // 16: daemon.ClashMode
(*ClashModeStatus)(nil), // 17: daemon.ClashModeStatus
(*SystemProxyStatus)(nil), // 18: daemon.SystemProxyStatus
(*SetSystemProxyEnabledRequest)(nil), // 19: daemon.SetSystemProxyEnabledRequest
(*SubscribeConnectionsRequest)(nil), // 20: daemon.SubscribeConnectionsRequest
(*Connections)(nil), // 21: daemon.Connections
(*Connection)(nil), // 22: daemon.Connection
(*ProcessInfo)(nil), // 23: daemon.ProcessInfo
(*CloseConnectionRequest)(nil), // 24: daemon.CloseConnectionRequest
@@ -1986,14 +1957,14 @@ var (
)
var file_daemon_started_service_proto_depIdxs = []int32{
2, // 0: daemon.ServiceStatus.status:type_name -> daemon.ServiceStatus.Type
3, // 0: daemon.ServiceStatus.status:type_name -> daemon.ServiceStatus.Type
28, // 1: daemon.Log.messages:type_name -> daemon.Log.Message
0, // 2: daemon.DefaultLogLevel.level:type_name -> daemon.LogLevel
10, // 3: daemon.Groups.group:type_name -> daemon.Group
11, // 4: daemon.Group.items:type_name -> daemon.GroupItem
1, // 5: daemon.ConnectionEvent.type:type_name -> daemon.ConnectionEventType
22, // 6: daemon.ConnectionEvent.connection:type_name -> daemon.Connection
20, // 7: daemon.ConnectionEvents.events:type_name -> daemon.ConnectionEvent
11, // 3: daemon.Groups.group:type_name -> daemon.Group
12, // 4: daemon.Group.items:type_name -> daemon.GroupItem
1, // 5: daemon.SubscribeConnectionsRequest.filter:type_name -> daemon.ConnectionFilter
2, // 6: daemon.SubscribeConnectionsRequest.sortBy:type_name -> daemon.ConnectionSortBy
22, // 7: daemon.Connections.connections:type_name -> daemon.Connection
23, // 8: daemon.Connection.processInfo:type_name -> daemon.ProcessInfo
26, // 9: daemon.DeprecatedWarnings.warnings:type_name -> daemon.DeprecatedWarning
0, // 10: daemon.Log.Message.level:type_name -> daemon.LogLevel
@@ -2003,38 +1974,38 @@ var file_daemon_started_service_proto_depIdxs = []int32{
29, // 14: daemon.StartedService.SubscribeLog:input_type -> google.protobuf.Empty
29, // 15: daemon.StartedService.GetDefaultLogLevel:input_type -> google.protobuf.Empty
29, // 16: daemon.StartedService.ClearLogs:input_type -> google.protobuf.Empty
5, // 17: daemon.StartedService.SubscribeStatus:input_type -> daemon.SubscribeStatusRequest
6, // 17: daemon.StartedService.SubscribeStatus:input_type -> daemon.SubscribeStatusRequest
29, // 18: daemon.StartedService.SubscribeGroups:input_type -> google.protobuf.Empty
29, // 19: daemon.StartedService.GetClashModeStatus:input_type -> google.protobuf.Empty
29, // 20: daemon.StartedService.SubscribeClashMode:input_type -> google.protobuf.Empty
15, // 21: daemon.StartedService.SetClashMode:input_type -> daemon.ClashMode
12, // 22: daemon.StartedService.URLTest:input_type -> daemon.URLTestRequest
13, // 23: daemon.StartedService.SelectOutbound:input_type -> daemon.SelectOutboundRequest
14, // 24: daemon.StartedService.SetGroupExpand:input_type -> daemon.SetGroupExpandRequest
16, // 21: daemon.StartedService.SetClashMode:input_type -> daemon.ClashMode
13, // 22: daemon.StartedService.URLTest:input_type -> daemon.URLTestRequest
14, // 23: daemon.StartedService.SelectOutbound:input_type -> daemon.SelectOutboundRequest
15, // 24: daemon.StartedService.SetGroupExpand:input_type -> daemon.SetGroupExpandRequest
29, // 25: daemon.StartedService.GetSystemProxyStatus:input_type -> google.protobuf.Empty
18, // 26: daemon.StartedService.SetSystemProxyEnabled:input_type -> daemon.SetSystemProxyEnabledRequest
19, // 27: daemon.StartedService.SubscribeConnections:input_type -> daemon.SubscribeConnectionsRequest
19, // 26: daemon.StartedService.SetSystemProxyEnabled:input_type -> daemon.SetSystemProxyEnabledRequest
20, // 27: daemon.StartedService.SubscribeConnections:input_type -> daemon.SubscribeConnectionsRequest
24, // 28: daemon.StartedService.CloseConnection:input_type -> daemon.CloseConnectionRequest
29, // 29: daemon.StartedService.CloseAllConnections:input_type -> google.protobuf.Empty
29, // 30: daemon.StartedService.GetDeprecatedWarnings:input_type -> google.protobuf.Empty
29, // 31: daemon.StartedService.GetStartedAt:input_type -> google.protobuf.Empty
29, // 32: daemon.StartedService.StopService:output_type -> google.protobuf.Empty
29, // 33: daemon.StartedService.ReloadService:output_type -> google.protobuf.Empty
3, // 34: daemon.StartedService.SubscribeServiceStatus:output_type -> daemon.ServiceStatus
6, // 35: daemon.StartedService.SubscribeLog:output_type -> daemon.Log
7, // 36: daemon.StartedService.GetDefaultLogLevel:output_type -> daemon.DefaultLogLevel
4, // 34: daemon.StartedService.SubscribeServiceStatus:output_type -> daemon.ServiceStatus
7, // 35: daemon.StartedService.SubscribeLog:output_type -> daemon.Log
8, // 36: daemon.StartedService.GetDefaultLogLevel:output_type -> daemon.DefaultLogLevel
29, // 37: daemon.StartedService.ClearLogs:output_type -> google.protobuf.Empty
8, // 38: daemon.StartedService.SubscribeStatus:output_type -> daemon.Status
9, // 39: daemon.StartedService.SubscribeGroups:output_type -> daemon.Groups
16, // 40: daemon.StartedService.GetClashModeStatus:output_type -> daemon.ClashModeStatus
15, // 41: daemon.StartedService.SubscribeClashMode:output_type -> daemon.ClashMode
9, // 38: daemon.StartedService.SubscribeStatus:output_type -> daemon.Status
10, // 39: daemon.StartedService.SubscribeGroups:output_type -> daemon.Groups
17, // 40: daemon.StartedService.GetClashModeStatus:output_type -> daemon.ClashModeStatus
16, // 41: daemon.StartedService.SubscribeClashMode:output_type -> daemon.ClashMode
29, // 42: daemon.StartedService.SetClashMode:output_type -> google.protobuf.Empty
29, // 43: daemon.StartedService.URLTest:output_type -> google.protobuf.Empty
29, // 44: daemon.StartedService.SelectOutbound:output_type -> google.protobuf.Empty
29, // 45: daemon.StartedService.SetGroupExpand:output_type -> google.protobuf.Empty
17, // 46: daemon.StartedService.GetSystemProxyStatus:output_type -> daemon.SystemProxyStatus
18, // 46: daemon.StartedService.GetSystemProxyStatus:output_type -> daemon.SystemProxyStatus
29, // 47: daemon.StartedService.SetSystemProxyEnabled:output_type -> google.protobuf.Empty
21, // 48: daemon.StartedService.SubscribeConnections:output_type -> daemon.ConnectionEvents
21, // 48: daemon.StartedService.SubscribeConnections:output_type -> daemon.Connections
29, // 49: daemon.StartedService.CloseConnection:output_type -> google.protobuf.Empty
29, // 50: daemon.StartedService.CloseAllConnections:output_type -> google.protobuf.Empty
25, // 51: daemon.StartedService.GetDeprecatedWarnings:output_type -> daemon.DeprecatedWarnings
@@ -2056,8 +2027,8 @@ func file_daemon_started_service_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_daemon_started_service_proto_rawDesc), len(file_daemon_started_service_proto_rawDesc)),
NumEnums: 3,
NumMessages: 26,
NumEnums: 4,
NumMessages: 25,
NumExtensions: 0,
NumServices: 1,
},

View File

@@ -27,7 +27,7 @@ service StartedService {
rpc GetSystemProxyStatus(google.protobuf.Empty) returns(SystemProxyStatus) {}
rpc SetSystemProxyEnabled(SetSystemProxyEnabledRequest) returns(google.protobuf.Empty) {}
rpc SubscribeConnections(SubscribeConnectionsRequest) returns(stream ConnectionEvents) {}
rpc SubscribeConnections(SubscribeConnectionsRequest) returns(stream Connections) {}
rpc CloseConnection(CloseConnectionRequest) returns(google.protobuf.Empty) {}
rpc CloseAllConnections(google.protobuf.Empty) returns(google.protobuf.Empty) {}
rpc GetDeprecatedWarnings(google.protobuf.Empty) returns(DeprecatedWarnings) {}
@@ -143,26 +143,24 @@ message SetSystemProxyEnabledRequest {
message SubscribeConnectionsRequest {
int64 interval = 1;
ConnectionFilter filter = 2;
ConnectionSortBy sortBy = 3;
}
enum ConnectionEventType {
CONNECTION_EVENT_NEW = 0;
CONNECTION_EVENT_UPDATE = 1;
CONNECTION_EVENT_CLOSED = 2;
enum ConnectionFilter {
ALL = 0;
ACTIVE = 1;
CLOSED = 2;
}
message ConnectionEvent {
ConnectionEventType type = 1;
string id = 2;
Connection connection = 3;
int64 uplinkDelta = 4;
int64 downlinkDelta = 5;
int64 closedAt = 6;
enum ConnectionSortBy {
DATE = 0;
TRAFFIC = 1;
TOTAL_TRAFFIC = 2;
}
message ConnectionEvents {
repeated ConnectionEvent events = 1;
bool reset = 2;
message Connections {
repeated Connection connections = 1;
}
message Connection {

View File

@@ -58,7 +58,7 @@ type StartedServiceClient interface {
SetGroupExpand(ctx context.Context, in *SetGroupExpandRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
GetSystemProxyStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SystemProxyStatus, error)
SetSystemProxyEnabled(ctx context.Context, in *SetSystemProxyEnabledRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
SubscribeConnections(ctx context.Context, in *SubscribeConnectionsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ConnectionEvents], error)
SubscribeConnections(ctx context.Context, in *SubscribeConnectionsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Connections], error)
CloseConnection(ctx context.Context, in *CloseConnectionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
CloseAllConnections(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error)
GetDeprecatedWarnings(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DeprecatedWarnings, error)
@@ -278,13 +278,13 @@ func (c *startedServiceClient) SetSystemProxyEnabled(ctx context.Context, in *Se
return out, nil
}
func (c *startedServiceClient) SubscribeConnections(ctx context.Context, in *SubscribeConnectionsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ConnectionEvents], error) {
func (c *startedServiceClient) SubscribeConnections(ctx context.Context, in *SubscribeConnectionsRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[Connections], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &StartedService_ServiceDesc.Streams[5], StartedService_SubscribeConnections_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &grpc.GenericClientStream[SubscribeConnectionsRequest, ConnectionEvents]{ClientStream: stream}
x := &grpc.GenericClientStream[SubscribeConnectionsRequest, Connections]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -295,7 +295,7 @@ func (c *startedServiceClient) SubscribeConnections(ctx context.Context, in *Sub
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StartedService_SubscribeConnectionsClient = grpc.ServerStreamingClient[ConnectionEvents]
type StartedService_SubscribeConnectionsClient = grpc.ServerStreamingClient[Connections]
func (c *startedServiceClient) CloseConnection(ctx context.Context, in *CloseConnectionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
@@ -357,7 +357,7 @@ type StartedServiceServer interface {
SetGroupExpand(context.Context, *SetGroupExpandRequest) (*emptypb.Empty, error)
GetSystemProxyStatus(context.Context, *emptypb.Empty) (*SystemProxyStatus, error)
SetSystemProxyEnabled(context.Context, *SetSystemProxyEnabledRequest) (*emptypb.Empty, error)
SubscribeConnections(*SubscribeConnectionsRequest, grpc.ServerStreamingServer[ConnectionEvents]) error
SubscribeConnections(*SubscribeConnectionsRequest, grpc.ServerStreamingServer[Connections]) error
CloseConnection(context.Context, *CloseConnectionRequest) (*emptypb.Empty, error)
CloseAllConnections(context.Context, *emptypb.Empty) (*emptypb.Empty, error)
GetDeprecatedWarnings(context.Context, *emptypb.Empty) (*DeprecatedWarnings, error)
@@ -373,87 +373,87 @@ type StartedServiceServer interface {
type UnimplementedStartedServiceServer struct{}
func (UnimplementedStartedServiceServer) StopService(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method StopService not implemented")
return nil, status.Errorf(codes.Unimplemented, "method StopService not implemented")
}
func (UnimplementedStartedServiceServer) ReloadService(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method ReloadService not implemented")
return nil, status.Errorf(codes.Unimplemented, "method ReloadService not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeServiceStatus(*emptypb.Empty, grpc.ServerStreamingServer[ServiceStatus]) error {
return status.Error(codes.Unimplemented, "method SubscribeServiceStatus not implemented")
return status.Errorf(codes.Unimplemented, "method SubscribeServiceStatus not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeLog(*emptypb.Empty, grpc.ServerStreamingServer[Log]) error {
return status.Error(codes.Unimplemented, "method SubscribeLog not implemented")
return status.Errorf(codes.Unimplemented, "method SubscribeLog not implemented")
}
func (UnimplementedStartedServiceServer) GetDefaultLogLevel(context.Context, *emptypb.Empty) (*DefaultLogLevel, error) {
return nil, status.Error(codes.Unimplemented, "method GetDefaultLogLevel not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetDefaultLogLevel not implemented")
}
func (UnimplementedStartedServiceServer) ClearLogs(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method ClearLogs not implemented")
return nil, status.Errorf(codes.Unimplemented, "method ClearLogs not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeStatus(*SubscribeStatusRequest, grpc.ServerStreamingServer[Status]) error {
return status.Error(codes.Unimplemented, "method SubscribeStatus not implemented")
return status.Errorf(codes.Unimplemented, "method SubscribeStatus not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeGroups(*emptypb.Empty, grpc.ServerStreamingServer[Groups]) error {
return status.Error(codes.Unimplemented, "method SubscribeGroups not implemented")
return status.Errorf(codes.Unimplemented, "method SubscribeGroups not implemented")
}
func (UnimplementedStartedServiceServer) GetClashModeStatus(context.Context, *emptypb.Empty) (*ClashModeStatus, error) {
return nil, status.Error(codes.Unimplemented, "method GetClashModeStatus not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetClashModeStatus not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeClashMode(*emptypb.Empty, grpc.ServerStreamingServer[ClashMode]) error {
return status.Error(codes.Unimplemented, "method SubscribeClashMode not implemented")
return status.Errorf(codes.Unimplemented, "method SubscribeClashMode not implemented")
}
func (UnimplementedStartedServiceServer) SetClashMode(context.Context, *ClashMode) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method SetClashMode not implemented")
return nil, status.Errorf(codes.Unimplemented, "method SetClashMode not implemented")
}
func (UnimplementedStartedServiceServer) URLTest(context.Context, *URLTestRequest) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method URLTest not implemented")
return nil, status.Errorf(codes.Unimplemented, "method URLTest not implemented")
}
func (UnimplementedStartedServiceServer) SelectOutbound(context.Context, *SelectOutboundRequest) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method SelectOutbound not implemented")
return nil, status.Errorf(codes.Unimplemented, "method SelectOutbound not implemented")
}
func (UnimplementedStartedServiceServer) SetGroupExpand(context.Context, *SetGroupExpandRequest) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method SetGroupExpand not implemented")
return nil, status.Errorf(codes.Unimplemented, "method SetGroupExpand not implemented")
}
func (UnimplementedStartedServiceServer) GetSystemProxyStatus(context.Context, *emptypb.Empty) (*SystemProxyStatus, error) {
return nil, status.Error(codes.Unimplemented, "method GetSystemProxyStatus not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetSystemProxyStatus not implemented")
}
func (UnimplementedStartedServiceServer) SetSystemProxyEnabled(context.Context, *SetSystemProxyEnabledRequest) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method SetSystemProxyEnabled not implemented")
return nil, status.Errorf(codes.Unimplemented, "method SetSystemProxyEnabled not implemented")
}
func (UnimplementedStartedServiceServer) SubscribeConnections(*SubscribeConnectionsRequest, grpc.ServerStreamingServer[ConnectionEvents]) error {
return status.Error(codes.Unimplemented, "method SubscribeConnections not implemented")
func (UnimplementedStartedServiceServer) SubscribeConnections(*SubscribeConnectionsRequest, grpc.ServerStreamingServer[Connections]) error {
return status.Errorf(codes.Unimplemented, "method SubscribeConnections not implemented")
}
func (UnimplementedStartedServiceServer) CloseConnection(context.Context, *CloseConnectionRequest) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method CloseConnection not implemented")
return nil, status.Errorf(codes.Unimplemented, "method CloseConnection not implemented")
}
func (UnimplementedStartedServiceServer) CloseAllConnections(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Error(codes.Unimplemented, "method CloseAllConnections not implemented")
return nil, status.Errorf(codes.Unimplemented, "method CloseAllConnections not implemented")
}
func (UnimplementedStartedServiceServer) GetDeprecatedWarnings(context.Context, *emptypb.Empty) (*DeprecatedWarnings, error) {
return nil, status.Error(codes.Unimplemented, "method GetDeprecatedWarnings not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetDeprecatedWarnings not implemented")
}
func (UnimplementedStartedServiceServer) GetStartedAt(context.Context, *emptypb.Empty) (*StartedAt, error) {
return nil, status.Error(codes.Unimplemented, "method GetStartedAt not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetStartedAt not implemented")
}
func (UnimplementedStartedServiceServer) mustEmbedUnimplementedStartedServiceServer() {}
func (UnimplementedStartedServiceServer) testEmbeddedByValue() {}
@@ -466,7 +466,7 @@ type UnsafeStartedServiceServer interface {
}
func RegisterStartedServiceServer(s grpc.ServiceRegistrar, srv StartedServiceServer) {
// If the following call panics, it indicates UnimplementedStartedServiceServer was
// If the following call pancis, it indicates UnimplementedStartedServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
@@ -734,11 +734,11 @@ func _StartedService_SubscribeConnections_Handler(srv interface{}, stream grpc.S
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(StartedServiceServer).SubscribeConnections(m, &grpc.GenericServerStream[SubscribeConnectionsRequest, ConnectionEvents]{ServerStream: stream})
return srv.(StartedServiceServer).SubscribeConnections(m, &grpc.GenericServerStream[SubscribeConnectionsRequest, Connections]{ServerStream: stream})
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type StartedService_SubscribeConnectionsServer = grpc.ServerStreamingServer[ConnectionEvents]
type StartedService_SubscribeConnectionsServer = grpc.ServerStreamingServer[Connections]
func _StartedService_CloseConnection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CloseConnectionRequest)

View File

@@ -3,11 +3,11 @@ package box
import (
"runtime/debug"
"github.com/sagernet/sing-box/common/conntrack"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
)
func applyDebugOptions(options option.DebugOptions) error {
func applyDebugOptions(options option.DebugOptions) {
applyDebugListenOption(options)
if options.GCPercent != nil {
debug.SetGCPercent(*options.GCPercent)
@@ -26,9 +26,9 @@ func applyDebugOptions(options option.DebugOptions) error {
}
if options.MemoryLimit.Value() != 0 {
debug.SetMemoryLimit(int64(float64(options.MemoryLimit.Value()) / 1.5))
conntrack.MemoryLimit = options.MemoryLimit.Value()
}
if options.OOMKiller != nil {
return E.New("legacy oom_killer in debug options is removed, use oom-killer service instead")
conntrack.KillerEnabled = *options.OOMKiller
}
return nil
}

View File

@@ -144,11 +144,7 @@ func (c *Client) Exchange(ctx context.Context, transport adapter.DNSTransport, m
if c.cache != nil {
cond, loaded := c.cacheLock.LoadOrStore(question, make(chan struct{}))
if loaded {
select {
case <-cond:
case <-ctx.Done():
return nil, ctx.Err()
}
<-cond
} else {
defer func() {
c.cacheLock.Delete(question)
@@ -158,11 +154,7 @@ func (c *Client) Exchange(ctx context.Context, transport adapter.DNSTransport, m
} else if c.transportCache != nil {
cond, loaded := c.transportCacheLock.LoadOrStore(question, make(chan struct{}))
if loaded {
select {
case <-cond:
case <-ctx.Done():
return nil, ctx.Err()
}
<-cond
} else {
defer func() {
c.transportCacheLock.Delete(question)

View File

@@ -377,11 +377,9 @@ func (r *Router) Lookup(ctx context.Context, domain string, options adapter.DNSQ
case *R.RuleActionReject:
return nil, &R.RejectedError{Cause: action.Error(ctx)}
case *R.RuleActionPredefined:
responseAddrs = nil
if action.Rcode != mDNS.RcodeSuccess {
err = RcodeError(action.Rcode)
} else {
err = nil
for _, answer := range action.Answer {
switch record := answer.(type) {
case *mDNS.A:

View File

@@ -2,273 +2,6 @@
icon: material/alert-decagram
---
#### 1.13.0
Important changes since 1.12:
* Add NaiveProxy outbound **1**
* Add pre-match support for `auto_redirect` **2**
* Improve `auto_redirect` **3**
* Add Chrome Root Store certificate option **4**
* Add new options for ACME DNS-01 challenge providers **5**
* Add Wi-Fi state support for Linux and Windows **6**
* Add curve preferences, pinned public key SHA256, mTLS and ECH `query_server_name` for TLS options **7**
* Add kTLS support **8**
* Add ICMP echo (ping) proxy support **9**
* Add `interface_address`, `network_interface_address` and `default_interface_address` rule items **10**
* Add `preferred_by` route rule item **11**
* Improve `local` DNS server **12**
* Add `disable_tcp_keep_alive`, `tcp_keep_alive` and `tcp_keep_alive_interval` options for listen and dial fields **13**
* Add `bind_address_no_port` option for dial fields **14**
* Add system interface, relay server and advertise tags options for Tailscale endpoint **15**
* Add Claude Code Multiplexer service **16**
* Add OpenAI Codex Multiplexer service **17**
* Apple/Android: Refactor GUI
* Apple/Android: Add support for sharing configurations via [QRS](https://github.com/qifi-dev/qrs)
* Android: Add support for resisting VPN detection via Xposed
* Drop support for go1.23 **18**
* Drop support for Android 5.0 **19**
* Update uTLS to v1.8.2 **20**
* Update quic-go to v0.59.0
* Update gVisor to v20250811
* Update Tailscale to v1.92.4
**1**:
NaiveProxy outbound now supports QUIC, ECH, UDP over TCP, and configurable QUIC congestion control.
Only available on Apple platforms, Android, Windows and some Linux architectures.
Each Windows release includes `libcronet.dll`
ensure this file is in the same directory as `sing-box.exe` or in a directory listed in `PATH`.
See [NaiveProxy outbound](/configuration/outbound/naive/).
**2**:
`auto_redirect` now allows you to bypass sing-box for connections based on routing rules.
A new rule action `bypass` is introduced to support this feature. When matched during pre-match, the connection will bypass sing-box and connect directly.
This feature requires Linux with `auto_redirect` enabled.
See [Pre-match](/configuration/shared/pre-match/) and [Rule Action](/configuration/route/rule_action/#bypass).
**3**:
`auto_redirect` now rejects MPTCP connections by default to fix compatibility issues.
You can change it to bypass sing-box via the new `exclude_mptcp` option.
Adds a fallback iproute2 rule checked after system default rules (32766: main, 32767: default),
ensuring traffic is routed to the sing-box table when no route is found in system tables.
The rule index can be customized via `auto_redirect_iproute2_fallback_rule_index` (default: 32768).
See [TUN](/configuration/inbound/tun/#exclude_mptcp).
**4**:
Adds `chrome` as a new certificate store option alongside `mozilla`.
Both stores filter out China-based CA certificates.
See [Certificate](/configuration/certificate/#store).
**5**:
See [DNS-01 Challenge](/configuration/shared/dns01_challenge/).
**6**:
sing-box can now monitor Wi-Fi state on Linux and Windows to enable routing rules based on `wifi_ssid` and `wifi_bssid`.
See [Wi-Fi State](/configuration/shared/wifi-state/).
**7**:
See [TLS](/configuration/shared/tls/).
**8**:
Adds `kernel_tx` and `kernel_rx` options for TLS inbound.
Enables kernel-level TLS offloading via `splice(2)` on Linux 5.1+ with TLS 1.3.
See [TLS](/configuration/shared/tls/).
**9**:
sing-box can now proxy ICMP echo (ping) requests.
A new `icmp` network type is available for route rules.
Supported from TUN, WireGuard and Tailscale inbounds to Direct, WireGuard and Tailscale outbounds.
The `reject` action can also reply to ICMP echo requests.
**10**:
New rule items for matching based on interface IP addresses, available in route rules, DNS rules and rule-sets.
**11**:
Matches outbounds' preferred routes.
For Tailscale: MagicDNS domains and peers' allowed IPs. For WireGuard: peers' allowed IPs.
**12**:
The `local` DNS server now uses platform-native resolution:
`getaddrinfo`/libresolv on Apple platforms, systemd-resolved DBus on Linux.
A new `prefer_go` option is available to opt out.
See [Local DNS](/configuration/dns/server/local/).
**13**:
The default TCP keep-alive initial period has been updated from 10 minutes to 5 minutes.
See [Dial Fields](/configuration/shared/dial/#tcp_keep_alive).
**14**:
Adds the Linux socket option `IP_BIND_ADDRESS_NO_PORT` support when explicitly binding to a source address.
This allows reusing the same source port for multiple connections, improving scalability for high-concurrency proxy scenarios.
See [Dial Fields](/configuration/shared/dial/#bind_address_no_port).
**15**:
Tailscale endpoint can now create a system TUN interface to handle traffic directly.
New `relay_server_port` and `relay_server_static_endpoints` options for incoming relay connections.
New `advertise_tags` option for ACL tag advertisement.
See [Tailscale endpoint](/configuration/endpoint/tailscale/).
**16**:
CCM (Claude Code Multiplexer) service allows you to access your local Claude Code subscription remotely through custom tokens, eliminating the need for OAuth authentication on remote clients.
See [CCM](/configuration/service/ccm).
**17**:
See [OCM](/configuration/service/ocm).
**18**:
Due to maintenance difficulties, sing-box 1.13.0 requires at least Go 1.24 to compile.
**19**:
Due to maintenance difficulties, sing-box 1.13.0 will be the last version to support Android 5.0,
and only through a separate legacy build (with `-legacy-android-5` suffix).
For standalone binaries, the minimum Android version has been raised to Android 6.0,
since Termux requires Android 7.0 or later.
**20**:
This update fixes missing padding extension for Chrome 120+ fingerprints.
Also, documentation has been updated with a warning about uTLS fingerprinting vulnerabilities.
uTLS is not recommended for censorship circumvention due to fundamental architectural limitations;
use NaiveProxy instead for TLS fingerprint resistance.
#### 1.12.23
* Fixes and improvements
#### 1.13.0-rc.5
* Add `mipsle`, `mips64le`, `riscv64` and `loong64` support for NaiveProxy outbound
#### 1.12.22
* Fixes and improvements
#### 1.13.0-rc.3
* Fixes and improvements
#### 1.12.21
* Fixes and improvements
#### 1.13.0-rc.2
* Fixes and improvements
#### 1.12.20
* Fixes and improvements
#### 1.13.0-rc.1
* Fixes and improvements
#### 1.12.19
* Fixes and improvements
#### 1.13.0-beta.8
* Add fallback routing rule for `auto_redirect` **1**
* Fixes and improvements
**1**:
Adds a fallback iproute2 rule checked after system default rules (32766: main, 32767: default),
ensuring traffic is routed to the sing-box table when no route is found in system tables.
The rule index can be customized via `auto_redirect_iproute2_fallback_rule_index` (default: 32768).
#### 1.12.18
* Add fallback routing rule for `auto_redirect` **1**
* Fixes and improvements
**1**:
Adds a fallback iproute2 rule checked after system default rules (32766: main, 32767: default),
ensuring traffic is routed to the sing-box table when no route is found in system tables.
The rule index can be customized via `auto_redirect_iproute2_fallback_rule_index` (default: 32768).
#### 1.13.0-beta.6
* Update uTLS to v1.8.2 **1**
* Fixes and improvements
**1**:
This update fixes missing padding extension for Chrome 120+ fingerprints.
Also, documentation has been updated with a warning about uTLS fingerprinting vulnerabilities.
uTLS is not recommended for censorship circumvention due to fundamental architectural limitations;
use NaiveProxy instead for TLS fingerprint resistance.
#### 1.12.17
* Update uTLS to v1.8.2 **1**
* Fixes and improvements
**1**:
This update fixes missing padding extension for Chrome 120+ fingerprints.
Also, documentation has been updated with a warning about uTLS fingerprinting vulnerabilities.
uTLS is not recommended for censorship circumvention due to fundamental architectural limitations;
use NaiveProxy instead for TLS fingerprint resistance.
#### 1.13.0-beta.5
* Fixes and improvements
#### 1.12.16
* Fixes and improvements
#### 1.13.0-beta.4
* Apple/Android: Add support for sharing configurations via [QRS](https://github.com/qifi-dev/qrs)
* Android: Add support for resisting VPN detection via Xposed
* Update quic-go to v0.59.0
* Fixes and improvements
#### 1.13.0-beta.2
* Add `bind_address_no_port` option for dial fields **1**

View File

@@ -8,8 +8,7 @@ icon: material/new-box
:material-plus: [relay_server_static_endpoints](#relay_server_static_endpoints)
:material-plus: [system_interface](#system_interface)
:material-plus: [system_interface_name](#system_interface_name)
:material-plus: [system_interface_mtu](#system_interface_mtu)
:material-plus: [advertise_tags](#advertise_tags)
:material-plus: [system_interface_mtu](#system_interface_mtu)
!!! question "Since sing-box 1.12.0"
@@ -29,7 +28,6 @@ icon: material/new-box
"exit_node_allow_lan_access": false,
"advertise_routes": [],
"advertise_exit_node": false,
"advertise_tags": [],
"relay_server_port": 0,
"relay_server_static_endpoints": [],
"system_interface": false,
@@ -104,14 +102,6 @@ Example: `["192.168.1.1/24"]`
Indicates whether the node should advertise itself as an exit node.
#### advertise_tags
!!! question "Since sing-box 1.13.0"
Tags to advertise for this node, for ACL enforcement purposes.
Example: `["tag:server"]`
#### relay_server_port
!!! question "Since sing-box 1.13.0"

View File

@@ -8,8 +8,7 @@ icon: material/new-box
:material-plus: [relay_server_static_endpoints](#relay_server_static_endpoints)
:material-plus: [system_interface](#system_interface)
:material-plus: [system_interface_name](#system_interface_name)
:material-plus: [system_interface_mtu](#system_interface_mtu)
:material-plus: [advertise_tags](#advertise_tags)
:material-plus: [system_interface_mtu](#system_interface_mtu)
!!! question "自 sing-box 1.12.0 起"
@@ -29,7 +28,6 @@ icon: material/new-box
"exit_node_allow_lan_access": false,
"advertise_routes": [],
"advertise_exit_node": false,
"advertise_tags": [],
"relay_server_port": 0,
"relay_server_static_endpoints": [],
"system_interface": false,
@@ -103,14 +101,6 @@ icon: material/new-box
指示节点是否应将自己通告为出口节点。
#### advertise_tags
!!! question "自 sing-box 1.13.0 起"
为此节点通告的标签,用于 ACL 执行。
示例:`["tag:server"]`
#### relay_server_port
!!! question "自 sing-box 1.13.0 起"

View File

@@ -6,8 +6,7 @@ icon: material/new-box
:material-plus: [auto_redirect_reset_mark](#auto_redirect_reset_mark)
:material-plus: [auto_redirect_nfqueue](#auto_redirect_nfqueue)
:material-plus: [exclude_mptcp](#exclude_mptcp)
:material-plus: [auto_redirect_iproute2_fallback_rule_index](#auto_redirect_iproute2_fallback_rule_index)
:material-plus: [exclude_mptcp](#exclude_mptcp)
!!! quote "Changes in sing-box 1.12.0"
@@ -72,7 +71,6 @@ icon: material/new-box
"auto_redirect_output_mark": "0x2024",
"auto_redirect_reset_mark": "0x2025",
"auto_redirect_nfqueue": 100,
"auto_redirect_iproute2_fallback_rule_index": 32768,
"exclude_mptcp": false,
"loopback_address": [
"10.7.0.1"
@@ -305,17 +303,6 @@ NFQueue number used by `auto_redirect` pre-matching.
`100` is used by default.
#### auto_redirect_iproute2_fallback_rule_index
!!! question "Since sing-box 1.12.18"
Linux iproute2 fallback rule index generated by `auto_redirect`.
This rule is checked after system default rules (32766: main, 32767: default),
routing traffic to the sing-box table only when no route is found in system tables.
`32768` is used by default.
#### exclude_mptcp
!!! question "Since sing-box 1.13.0"

View File

@@ -6,8 +6,7 @@ icon: material/new-box
:material-plus: [auto_redirect_reset_mark](#auto_redirect_reset_mark)
:material-plus: [auto_redirect_nfqueue](#auto_redirect_nfqueue)
:material-plus: [exclude_mptcp](#exclude_mptcp)
:material-plus: [auto_redirect_iproute2_fallback_rule_index](#auto_redirect_iproute2_fallback_rule_index)
:material-plus: [exclude_mptcp](#exclude_mptcp)
!!! quote "sing-box 1.12.0 中的更改"
@@ -72,7 +71,6 @@ icon: material/new-box
"auto_redirect_output_mark": "0x2024",
"auto_redirect_reset_mark": "0x2025",
"auto_redirect_nfqueue": 100,
"auto_redirect_iproute2_fallback_rule_index": 32768,
"exclude_mptcp": false,
"loopback_address": [
"10.7.0.1"
@@ -304,24 +302,13 @@ tun 接口的 IPv6 前缀。
默认使用 `100`
#### auto_redirect_iproute2_fallback_rule_index
!!! question "自 sing-box 1.12.18 起"
`auto_redirect` 生成的 iproute2 回退规则索引。
此规则在系统默认规则32766: main32767: default之后检查
仅当系统路由表中未找到路由时才将流量路由到 sing-box 路由表。
默认使用 `32768`
#### exclude_mptcp
!!! question "自 sing-box 1.13.0 起"
!!! quote ""
仅支持 Linux且需要 nftables`auto_route``auto_redirect` 已启用。
仅支持 Linux且需要 nftables`auto_route``auto_redirect` 已启用。
由于协议限制MPTCP 无法被透明代理。

View File

@@ -5,8 +5,7 @@ icon: material/new-box
!!! quote "Changes in sing-box 1.13.0"
:material-plus: [alidns.security_token](#security_token)
:material-plus: [cloudflare.zone_token](#zone_token)
:material-plus: [acmedns](#acmedns)
:material-plus: [cloudflare.zone_token](#zone_token)
### Structure
@@ -55,19 +54,3 @@ The Security Token for STS temporary credentials.
Optional API token with `Zone:Read` permission.
When provided, allows `api_token` to be scoped to a single zone.
#### ACME-DNS
!!! question "Since sing-box 1.13.0"
```json
{
"provider": "acmedns",
"username": "",
"password": "",
"subdomain": "",
"server_url": ""
}
```
See [ACME-DNS](https://github.com/joohoi/acme-dns) for details.

View File

@@ -5,8 +5,7 @@ icon: material/new-box
!!! quote "sing-box 1.13.0 中的更改"
:material-plus: [alidns.security_token](#security_token)
:material-plus: [cloudflare.zone_token](#zone_token)
:material-plus: [acmedns](#acmedns)
:material-plus: [cloudflare.zone_token](#zone_token)
### 结构
@@ -55,19 +54,3 @@ icon: material/new-box
具有 `Zone:Read` 权限的可选 API 令牌。
提供后可将 `api_token` 限定到单个区域。
#### ACME-DNS
!!! question "自 sing-box 1.13.0 起"
```json
{
"provider": "acmedns",
"username": "",
"password": "",
"subdomain": "",
"server_url": ""
}
```
参阅 [ACME-DNS](https://github.com/joohoi/acme-dns)。

View File

@@ -418,18 +418,9 @@ Enable kernel TLS receive support.
==Client only==
!!! failure "Not Recommended"
uTLS has had repeated fingerprinting vulnerabilities discovered by researchers.
uTLS is a Go library that attempts to imitate browser TLS fingerprints by copying
ClientHello structure. However, browsers use completely different TLS stacks
(Chrome uses BoringSSL, Firefox uses NSS) with distinct implementation behaviors
that cannot be replicated by simply copying the handshake format, making detection possible.
Additionally, the library lacks active maintenance and has poor code quality,
making it unsuitable for censorship circumvention.
For TLS fingerprint resistance, use [NaiveProxy](/configuration/inbound/naive/) instead.
!!! failure ""
There is no evidence that GFW detects and blocks servers based on TLS client fingerprinting, and using an imperfect emulation that has not been security reviewed could pose security risks.
uTLS is a fork of "crypto/tls", which provides ClientHello fingerprinting resistance.

View File

@@ -417,16 +417,9 @@ echo | openssl s_client -servername example.com -connect example.com:443 2>/dev/
==仅客户端==
!!! failure "不推荐"
!!! failure ""
uTLS 已被研究人员多次发现其指纹可被识别的漏洞
uTLS 是一个试图通过复制 ClientHello 结构来模仿浏览器 TLS 指纹的 Go 库。
然而,浏览器使用完全不同的 TLS 实现Chrome 使用 BoringSSLFirefox 使用 NSS
其实现行为无法通过简单复制握手格式来复现,其行为细节必然存在差异,使得检测成为可能。
此外,此库缺乏积极维护,且代码质量较差,不建议用于反审查场景。
如需 TLS 指纹抵抗,请改用 [NaiveProxy](/configuration/inbound/naive/)。
没有证据表明 GFW 根据 TLS 客户端指纹检测并阻止服务器,并且,使用一个未经安全审查的不完美模拟可能带来安全隐患
uTLS 是 "crypto/tls" 的一个分支,它提供了 ClientHello 指纹识别阻力。

View File

@@ -4,7 +4,8 @@ icon: material/horse
# Trojan
Trojan is the most commonly used TLS proxy made in China. It can be used in various combinations.
Torjan is the most commonly used TLS proxy made in China. It can be used in various combinations,
but only the combination of uTLS and multiplexing is recommended.
| Protocol and implementation combination | Specification | Resists passive detection | Resists active probes |
|-----------------------------------------|----------------------------------------------------------------------|---------------------------|-----------------------|
@@ -139,7 +140,11 @@ Trojan is the most commonly used TLS proxy made in China. It can be used in vari
"password": "password",
"tls": {
"enabled": true,
"server_name": "example.org"
"server_name": "example.org",
"utls": {
"enabled": true,
"fingerprint": "firefox"
}
},
"multiplex": {
"enabled": true
@@ -166,7 +171,11 @@ Trojan is the most commonly used TLS proxy made in China. It can be used in vari
"tls": {
"enabled": true,
"server_name": "example.org",
"certificate_path": "/path/to/certificate.pem"
"certificate_path": "/path/to/certificate.pem",
"utls": {
"enabled": true,
"fingerprint": "firefox"
}
},
"multiplex": {
"enabled": true
@@ -189,7 +198,11 @@ Trojan is the most commonly used TLS proxy made in China. It can be used in vari
"tls": {
"enabled": true,
"server_name": "example.org",
"insecure": true
"insecure": true,
"utls": {
"enabled": true,
"fingerprint": "firefox"
}
},
"multiplex": {
"enabled": true

View File

@@ -45,7 +45,6 @@ type CacheFile struct {
storeRDRC bool
rdrcTimeout time.Duration
DB *bbolt.DB
resetAccess sync.Mutex
saveMetadataTimer *time.Timer
saveFakeIPAccess sync.RWMutex
saveDomain map[netip.Addr]string
@@ -170,55 +169,13 @@ func (c *CacheFile) Close() error {
return c.DB.Close()
}
func (c *CacheFile) view(fn func(tx *bbolt.Tx) error) (err error) {
defer func() {
if r := recover(); r != nil {
c.resetDB()
err = E.New("database corrupted: ", r)
}
}()
return c.DB.View(fn)
}
func (c *CacheFile) batch(fn func(tx *bbolt.Tx) error) (err error) {
defer func() {
if r := recover(); r != nil {
c.resetDB()
err = E.New("database corrupted: ", r)
}
}()
return c.DB.Batch(fn)
}
func (c *CacheFile) update(fn func(tx *bbolt.Tx) error) (err error) {
defer func() {
if r := recover(); r != nil {
c.resetDB()
err = E.New("database corrupted: ", r)
}
}()
return c.DB.Update(fn)
}
func (c *CacheFile) resetDB() {
c.resetAccess.Lock()
defer c.resetAccess.Unlock()
c.DB.Close()
os.Remove(c.path)
db, err := bbolt.Open(c.path, 0o666, &bbolt.Options{Timeout: time.Second})
if err == nil {
_ = filemanager.Chown(c.ctx, c.path)
c.DB = db
}
}
func (c *CacheFile) StoreFakeIP() bool {
return c.storeFakeIP
}
func (c *CacheFile) LoadMode() string {
var mode string
c.view(func(t *bbolt.Tx) error {
c.DB.View(func(t *bbolt.Tx) error {
bucket := t.Bucket(bucketMode)
if bucket == nil {
return nil
@@ -236,7 +193,7 @@ func (c *CacheFile) LoadMode() string {
}
func (c *CacheFile) StoreMode(mode string) error {
return c.batch(func(t *bbolt.Tx) error {
return c.DB.Batch(func(t *bbolt.Tx) error {
bucket, err := t.CreateBucketIfNotExists(bucketMode)
if err != nil {
return err
@@ -273,7 +230,7 @@ func (c *CacheFile) createBucket(t *bbolt.Tx, key []byte) (*bbolt.Bucket, error)
func (c *CacheFile) LoadSelected(group string) string {
var selected string
c.view(func(t *bbolt.Tx) error {
c.DB.View(func(t *bbolt.Tx) error {
bucket := c.bucket(t, bucketSelected)
if bucket == nil {
return nil
@@ -288,7 +245,7 @@ func (c *CacheFile) LoadSelected(group string) string {
}
func (c *CacheFile) StoreSelected(group, selected string) error {
return c.batch(func(t *bbolt.Tx) error {
return c.DB.Batch(func(t *bbolt.Tx) error {
bucket, err := c.createBucket(t, bucketSelected)
if err != nil {
return err
@@ -298,7 +255,7 @@ func (c *CacheFile) StoreSelected(group, selected string) error {
}
func (c *CacheFile) LoadGroupExpand(group string) (isExpand bool, loaded bool) {
c.view(func(t *bbolt.Tx) error {
c.DB.View(func(t *bbolt.Tx) error {
bucket := c.bucket(t, bucketExpand)
if bucket == nil {
return nil
@@ -314,7 +271,7 @@ func (c *CacheFile) LoadGroupExpand(group string) (isExpand bool, loaded bool) {
}
func (c *CacheFile) StoreGroupExpand(group string, isExpand bool) error {
return c.batch(func(t *bbolt.Tx) error {
return c.DB.Batch(func(t *bbolt.Tx) error {
bucket, err := c.createBucket(t, bucketExpand)
if err != nil {
return err
@@ -329,7 +286,7 @@ func (c *CacheFile) StoreGroupExpand(group string, isExpand bool) error {
func (c *CacheFile) LoadRuleSet(tag string) *adapter.SavedBinary {
var savedSet adapter.SavedBinary
err := c.view(func(t *bbolt.Tx) error {
err := c.DB.View(func(t *bbolt.Tx) error {
bucket := c.bucket(t, bucketRuleSet)
if bucket == nil {
return os.ErrNotExist
@@ -347,7 +304,7 @@ func (c *CacheFile) LoadRuleSet(tag string) *adapter.SavedBinary {
}
func (c *CacheFile) SaveRuleSet(tag string, set *adapter.SavedBinary) error {
return c.batch(func(t *bbolt.Tx) error {
return c.DB.Batch(func(t *bbolt.Tx) error {
bucket, err := c.createBucket(t, bucketRuleSet)
if err != nil {
return err

View File

@@ -23,7 +23,7 @@ var (
func (c *CacheFile) FakeIPMetadata() *adapter.FakeIPMetadata {
var metadata adapter.FakeIPMetadata
err := c.batch(func(tx *bbolt.Tx) error {
err := c.DB.Batch(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(bucketFakeIP)
if bucket == nil {
return os.ErrNotExist
@@ -45,7 +45,7 @@ func (c *CacheFile) FakeIPMetadata() *adapter.FakeIPMetadata {
}
func (c *CacheFile) FakeIPSaveMetadata(metadata *adapter.FakeIPMetadata) error {
return c.batch(func(tx *bbolt.Tx) error {
return c.DB.Batch(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketFakeIP)
if err != nil {
return err
@@ -69,7 +69,7 @@ func (c *CacheFile) FakeIPSaveMetadataAsync(metadata *adapter.FakeIPMetadata) {
}
func (c *CacheFile) FakeIPStore(address netip.Addr, domain string) error {
return c.batch(func(tx *bbolt.Tx) error {
return c.DB.Batch(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketFakeIP)
if err != nil {
return err
@@ -136,7 +136,7 @@ func (c *CacheFile) FakeIPLoad(address netip.Addr) (string, bool) {
return cachedDomain, true
}
var domain string
_ = c.view(func(tx *bbolt.Tx) error {
_ = c.DB.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(bucketFakeIP)
if bucket == nil {
return nil
@@ -163,7 +163,7 @@ func (c *CacheFile) FakeIPLoadDomain(domain string, isIPv6 bool) (netip.Addr, bo
return cachedAddress, true
}
var address netip.Addr
_ = c.view(func(tx *bbolt.Tx) error {
_ = c.DB.View(func(tx *bbolt.Tx) error {
var bucket *bbolt.Bucket
if isIPv6 {
bucket = tx.Bucket(bucketFakeIPDomain6)
@@ -180,7 +180,7 @@ func (c *CacheFile) FakeIPLoadDomain(domain string, isIPv6 bool) (netip.Addr, bo
}
func (c *CacheFile) FakeIPReset() error {
return c.batch(func(tx *bbolt.Tx) error {
return c.DB.Batch(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket(bucketFakeIP)
if err != nil {
return err

View File

@@ -31,7 +31,7 @@ func (c *CacheFile) LoadRDRC(transportName string, qName string, qType uint16) (
copy(key[2:], qName)
defer buf.Put(key)
var deleteCache bool
err := c.view(func(tx *bbolt.Tx) error {
err := c.DB.View(func(tx *bbolt.Tx) error {
bucket := c.bucket(tx, bucketRDRC)
if bucket == nil {
return nil
@@ -56,7 +56,7 @@ func (c *CacheFile) LoadRDRC(transportName string, qName string, qType uint16) (
return
}
if deleteCache {
c.update(func(tx *bbolt.Tx) error {
c.DB.Update(func(tx *bbolt.Tx) error {
bucket := c.bucket(tx, bucketRDRC)
if bucket == nil {
return nil
@@ -72,7 +72,7 @@ func (c *CacheFile) LoadRDRC(transportName string, qName string, qType uint16) (
}
func (c *CacheFile) SaveRDRC(transportName string, qName string, qType uint16) error {
return c.batch(func(tx *bbolt.Tx) error {
return c.DB.Batch(func(tx *bbolt.Tx) error {
bucket, err := c.createBucket(tx, bucketRDRC)
if err != nil {
return err

View File

@@ -10,31 +10,11 @@ import (
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/json"
"github.com/sagernet/sing/common/observable"
"github.com/sagernet/sing/common/x/list"
"github.com/gofrs/uuid/v5"
)
type ConnectionEventType int
const (
ConnectionEventNew ConnectionEventType = iota
ConnectionEventUpdate
ConnectionEventClosed
)
type ConnectionEvent struct {
Type ConnectionEventType
ID uuid.UUID
Metadata *TrackerMetadata
UplinkDelta int64
DownlinkDelta int64
ClosedAt time.Time
}
const closedConnectionsLimit = 1000
type Manager struct {
uploadTotal atomic.Int64
downloadTotal atomic.Int64
@@ -42,52 +22,29 @@ type Manager struct {
connections compatible.Map[uuid.UUID, Tracker]
closedConnectionsAccess sync.Mutex
closedConnections list.List[TrackerMetadata]
memory uint64
eventSubscriber *observable.Subscriber[ConnectionEvent]
// process *process.Process
memory uint64
}
func NewManager() *Manager {
return &Manager{}
}
func (m *Manager) SetEventHook(subscriber *observable.Subscriber[ConnectionEvent]) {
m.eventSubscriber = subscriber
}
func (m *Manager) Join(c Tracker) {
metadata := c.Metadata()
m.connections.Store(metadata.ID, c)
if m.eventSubscriber != nil {
m.eventSubscriber.Emit(ConnectionEvent{
Type: ConnectionEventNew,
ID: metadata.ID,
Metadata: metadata,
})
}
m.connections.Store(c.Metadata().ID, c)
}
func (m *Manager) Leave(c Tracker) {
metadata := c.Metadata()
_, loaded := m.connections.LoadAndDelete(metadata.ID)
if loaded {
closedAt := time.Now()
metadata.ClosedAt = closedAt
metadataCopy := *metadata
metadata.ClosedAt = time.Now()
m.closedConnectionsAccess.Lock()
if m.closedConnections.Len() >= closedConnectionsLimit {
defer m.closedConnectionsAccess.Unlock()
if m.closedConnections.Len() >= 1000 {
m.closedConnections.PopFront()
}
m.closedConnections.PushBack(metadataCopy)
m.closedConnectionsAccess.Unlock()
if m.eventSubscriber != nil {
m.eventSubscriber.Emit(ConnectionEvent{
Type: ConnectionEventClosed,
ID: metadata.ID,
Metadata: &metadataCopy,
ClosedAt: closedAt,
})
}
m.closedConnections.PushBack(metadata)
}
}
@@ -107,8 +64,8 @@ func (m *Manager) ConnectionsLen() int {
return m.connections.Len()
}
func (m *Manager) Connections() []*TrackerMetadata {
var connections []*TrackerMetadata
func (m *Manager) Connections() []TrackerMetadata {
var connections []TrackerMetadata
m.connections.Range(func(_ uuid.UUID, value Tracker) bool {
connections = append(connections, value.Metadata())
return true
@@ -116,18 +73,10 @@ func (m *Manager) Connections() []*TrackerMetadata {
return connections
}
func (m *Manager) ClosedConnections() []*TrackerMetadata {
func (m *Manager) ClosedConnections() []TrackerMetadata {
m.closedConnectionsAccess.Lock()
values := m.closedConnections.Array()
m.closedConnectionsAccess.Unlock()
if len(values) == 0 {
return nil
}
connections := make([]*TrackerMetadata, len(values))
for i := range values {
connections[i] = &values[i]
}
return connections
defer m.closedConnectionsAccess.Unlock()
return m.closedConnections.Array()
}
func (m *Manager) Connection(id uuid.UUID) Tracker {
@@ -175,7 +124,7 @@ func (s *Snapshot) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]any{
"downloadTotal": s.Download,
"uploadTotal": s.Upload,
"connections": common.Map(s.Connections, func(t Tracker) *TrackerMetadata { return t.Metadata() }),
"connections": common.Map(s.Connections, func(t Tracker) TrackerMetadata { return t.Metadata() }),
"memory": s.Memory,
})
}

View File

@@ -87,7 +87,7 @@ func (t TrackerMetadata) MarshalJSON() ([]byte, error) {
}
type Tracker interface {
Metadata() *TrackerMetadata
Metadata() TrackerMetadata
Close() error
}
@@ -97,8 +97,8 @@ type TCPConn struct {
manager *Manager
}
func (tt *TCPConn) Metadata() *TrackerMetadata {
return &tt.metadata
func (tt *TCPConn) Metadata() TrackerMetadata {
return tt.metadata
}
func (tt *TCPConn) Close() error {
@@ -178,8 +178,8 @@ type UDPConn struct {
manager *Manager
}
func (ut *UDPConn) Metadata() *TrackerMetadata {
return &ut.metadata
func (ut *UDPConn) Metadata() TrackerMetadata {
return ut.metadata
}
func (ut *UDPConn) Close() error {

View File

@@ -27,7 +27,6 @@ type CommandClient struct {
ctx context.Context
cancel context.CancelFunc
clientMutex sync.RWMutex
standalone bool
}
type CommandClientOptions struct {
@@ -49,7 +48,7 @@ type CommandClientHandler interface {
WriteGroups(message OutboundGroupIterator)
InitializeClashMode(modeList StringIterator, currentMode string)
UpdateClashMode(newMode string)
WriteConnectionEvents(events *ConnectionEvents)
WriteConnections(message *Connections)
}
type LogEntry struct {
@@ -74,7 +73,7 @@ func SetXPCDialer(dialer XPCDialer) {
}
func NewStandaloneCommandClient() *CommandClient {
return &CommandClient{standalone: true}
return new(CommandClient)
}
func NewCommandClient(handler CommandClientHandler, options *CommandClientOptions) *CommandClient {
@@ -98,139 +97,147 @@ func streamClientAuthInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc
return streamer(ctx, desc, cc, method, opts...)
}
const (
commandClientDialAttempts = 10
commandClientDialBaseDelay = 100 * time.Millisecond
commandClientDialStepDelay = 50 * time.Millisecond
)
func commandClientDialDelay(attempt int) time.Duration {
return commandClientDialBaseDelay + time.Duration(attempt)*commandClientDialStepDelay
}
func dialTarget() (string, func(context.Context, string) (net.Conn, error)) {
if sXPCDialer != nil {
return "passthrough:///xpc", func(ctx context.Context, _ string) (net.Conn, error) {
fileDescriptor, err := sXPCDialer.DialXPC()
if err != nil {
return nil, err
}
return networkConnectionFromFileDescriptor(fileDescriptor)
}
}
func (c *CommandClient) grpcDial() (*grpc.ClientConn, error) {
var target string
if sCommandServerListenPort == 0 {
socketPath := filepath.Join(sBasePath, "command.sock")
return "passthrough:///command-socket", func(ctx context.Context, _ string) (net.Conn, error) {
var networkDialer net.Dialer
return networkDialer.DialContext(ctx, "unix", socketPath)
}
target = "unix://" + filepath.Join(sBasePath, "command.sock")
} else {
target = net.JoinHostPort("127.0.0.1", strconv.Itoa(int(sCommandServerListenPort)))
}
return net.JoinHostPort("127.0.0.1", strconv.Itoa(int(sCommandServerListenPort))), nil
}
func networkConnectionFromFileDescriptor(fileDescriptor int32) (net.Conn, error) {
file := os.NewFile(uintptr(fileDescriptor), "xpc-command-socket")
if file == nil {
return nil, E.New("invalid file descriptor")
var (
conn *grpc.ClientConn
err error
)
clientOptions := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(unaryClientAuthInterceptor),
grpc.WithStreamInterceptor(streamClientAuthInterceptor),
}
networkConnection, err := net.FileConn(file)
if err != nil {
file.Close()
return nil, E.Cause(err, "create connection from fd")
}
file.Close()
return networkConnection, nil
}
func (c *CommandClient) dialWithRetry(target string, contextDialer func(context.Context, string) (net.Conn, error), retryDial bool) (*grpc.ClientConn, daemon.StartedServiceClient, error) {
var connection *grpc.ClientConn
var client daemon.StartedServiceClient
var lastError error
for attempt := 0; attempt < commandClientDialAttempts; attempt++ {
if connection == nil {
options := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithUnaryInterceptor(unaryClientAuthInterceptor),
grpc.WithStreamInterceptor(streamClientAuthInterceptor),
}
if contextDialer != nil {
options = append(options, grpc.WithContextDialer(contextDialer))
}
var err error
connection, err = grpc.NewClient(target, options...)
if err != nil {
lastError = err
if !retryDial {
return nil, nil, err
}
time.Sleep(commandClientDialDelay(attempt))
continue
}
client = daemon.NewStartedServiceClient(connection)
}
waitDuration := commandClientDialDelay(attempt)
ctx, cancel := context.WithTimeout(context.Background(), waitDuration)
_, err := client.GetStartedAt(ctx, &emptypb.Empty{}, grpc.WaitForReady(true))
cancel()
for i := 0; i < 10; i++ {
conn, err = grpc.NewClient(target, clientOptions...)
if err == nil {
return connection, client, nil
return conn, nil
}
lastError = err
time.Sleep(time.Duration(100+i*50) * time.Millisecond)
}
if connection != nil {
connection.Close()
}
return nil, nil, lastError
return nil, err
}
func (c *CommandClient) Connect() error {
c.clientMutex.Lock()
common.Close(common.PtrOrNil(c.grpcConn))
target, contextDialer := dialTarget()
connection, client, err := c.dialWithRetry(target, contextDialer, true)
if err != nil {
if sXPCDialer != nil {
fd, err := sXPCDialer.DialXPC()
if err != nil {
c.clientMutex.Unlock()
return err
}
file := os.NewFile(uintptr(fd), "xpc-command-socket")
if file == nil {
c.clientMutex.Unlock()
return E.New("invalid file descriptor")
}
netConn, err := net.FileConn(file)
if err != nil {
file.Close()
c.clientMutex.Unlock()
return E.Cause(err, "create connection from fd")
}
file.Close()
clientOptions := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
return netConn, nil
}),
grpc.WithUnaryInterceptor(unaryClientAuthInterceptor),
grpc.WithStreamInterceptor(streamClientAuthInterceptor),
}
grpcConn, err := grpc.NewClient("passthrough:///xpc", clientOptions...)
if err != nil {
netConn.Close()
c.clientMutex.Unlock()
return err
}
c.grpcConn = grpcConn
c.grpcClient = daemon.NewStartedServiceClient(grpcConn)
c.ctx, c.cancel = context.WithCancel(context.Background())
c.clientMutex.Unlock()
} else {
conn, err := c.grpcDial()
if err != nil {
c.clientMutex.Unlock()
return err
}
c.grpcConn = conn
c.grpcClient = daemon.NewStartedServiceClient(conn)
c.ctx, c.cancel = context.WithCancel(context.Background())
c.clientMutex.Unlock()
return err
}
c.grpcConn = connection
c.grpcClient = client
c.ctx, c.cancel = context.WithCancel(context.Background())
c.clientMutex.Unlock()
c.handler.Connected()
return c.dispatchCommands()
for _, command := range c.options.commands {
switch command {
case CommandLog:
go c.handleLogStream()
case CommandStatus:
go c.handleStatusStream()
case CommandGroup:
go c.handleGroupStream()
case CommandClashMode:
go c.handleClashModeStream()
case CommandConnections:
go c.handleConnectionsStream()
default:
return E.New("unknown command: ", command)
}
}
return nil
}
func (c *CommandClient) ConnectWithFD(fd int32) error {
c.clientMutex.Lock()
common.Close(common.PtrOrNil(c.grpcConn))
networkConnection, err := networkConnectionFromFileDescriptor(fd)
file := os.NewFile(uintptr(fd), "xpc-command-socket")
if file == nil {
c.clientMutex.Unlock()
return E.New("invalid file descriptor")
}
netConn, err := net.FileConn(file)
if err != nil {
file.Close()
c.clientMutex.Unlock()
return E.Cause(err, "create connection from fd")
}
file.Close()
clientOptions := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
return netConn, nil
}),
grpc.WithUnaryInterceptor(unaryClientAuthInterceptor),
grpc.WithStreamInterceptor(streamClientAuthInterceptor),
}
grpcConn, err := grpc.NewClient("passthrough:///xpc", clientOptions...)
if err != nil {
netConn.Close()
c.clientMutex.Unlock()
return err
}
connection, client, err := c.dialWithRetry("passthrough:///xpc", func(ctx context.Context, _ string) (net.Conn, error) {
return networkConnection, nil
}, false)
if err != nil {
networkConnection.Close()
c.clientMutex.Unlock()
return err
}
c.grpcConn = connection
c.grpcClient = client
c.grpcConn = grpcConn
c.grpcClient = daemon.NewStartedServiceClient(grpcConn)
c.ctx, c.cancel = context.WithCancel(context.Background())
c.clientMutex.Unlock()
c.handler.Connected()
return c.dispatchCommands()
}
func (c *CommandClient) dispatchCommands() error {
for _, command := range c.options.commands {
switch command {
case CommandLog:
@@ -274,41 +281,57 @@ func (c *CommandClient) getClientForCall() (daemon.StartedServiceClient, error)
return c.grpcClient, nil
}
target, contextDialer := dialTarget()
connection, client, err := c.dialWithRetry(target, contextDialer, true)
if sXPCDialer != nil {
fd, err := sXPCDialer.DialXPC()
if err != nil {
return nil, err
}
file := os.NewFile(uintptr(fd), "xpc-command-socket")
if file == nil {
return nil, E.New("invalid file descriptor")
}
netConn, err := net.FileConn(file)
if err != nil {
file.Close()
return nil, E.Cause(err, "create connection from fd")
}
file.Close()
clientOptions := []grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {
return netConn, nil
}),
grpc.WithUnaryInterceptor(unaryClientAuthInterceptor),
grpc.WithStreamInterceptor(streamClientAuthInterceptor),
}
grpcConn, err := grpc.NewClient("passthrough:///xpc", clientOptions...)
if err != nil {
netConn.Close()
return nil, err
}
c.grpcConn = grpcConn
c.grpcClient = daemon.NewStartedServiceClient(grpcConn)
if c.ctx == nil {
c.ctx, c.cancel = context.WithCancel(context.Background())
}
return c.grpcClient, nil
}
conn, err := c.grpcDial()
if err != nil {
return nil, err
}
c.grpcConn = connection
c.grpcClient = client
c.grpcConn = conn
c.grpcClient = daemon.NewStartedServiceClient(conn)
if c.ctx == nil {
c.ctx, c.cancel = context.WithCancel(context.Background())
}
return c.grpcClient, nil
}
func (c *CommandClient) closeConnection() {
c.clientMutex.Lock()
defer c.clientMutex.Unlock()
if c.grpcConn != nil {
c.grpcConn.Close()
c.grpcConn = nil
c.grpcClient = nil
}
}
func callWithResult[T any](c *CommandClient, call func(client daemon.StartedServiceClient) (T, error)) (T, error) {
client, err := c.getClientForCall()
if err != nil {
var zero T
return zero, err
}
if c.standalone {
defer c.closeConnection()
}
return call(client)
}
func (c *CommandClient) getStreamContext() (daemon.StartedServiceClient, context.Context) {
c.clientMutex.RLock()
defer c.clientMutex.RUnlock()
@@ -366,7 +389,7 @@ func (c *CommandClient) handleStatusStream() {
c.handler.Disconnected(err.Error())
return
}
c.handler.WriteStatus(statusMessageFromGRPC(status))
c.handler.WriteStatus(StatusMessageFromGRPC(status))
}
}
@@ -385,7 +408,7 @@ func (c *CommandClient) handleGroupStream() {
c.handler.Disconnected(err.Error())
return
}
c.handler.WriteGroups(outboundGroupIteratorFromGRPC(groups))
c.handler.WriteGroups(OutboundGroupIteratorFromGRPC(groups))
}
}
@@ -445,134 +468,175 @@ func (c *CommandClient) handleConnectionsStream() {
return
}
var connections Connections
for {
events, err := stream.Recv()
conns, err := stream.Recv()
if err != nil {
c.handler.Disconnected(err.Error())
return
}
libboxEvents := connectionEventsFromGRPC(events)
c.handler.WriteConnectionEvents(libboxEvents)
connections.input = ConnectionsFromGRPC(conns)
c.handler.WriteConnections(&connections)
}
}
func (c *CommandClient) SelectOutbound(groupTag string, outboundTag string) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.SelectOutbound(context.Background(), &daemon.SelectOutboundRequest{
GroupTag: groupTag,
OutboundTag: outboundTag,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.SelectOutbound(context.Background(), &daemon.SelectOutboundRequest{
GroupTag: groupTag,
OutboundTag: outboundTag,
})
return err
}
func (c *CommandClient) URLTest(groupTag string) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.URLTest(context.Background(), &daemon.URLTestRequest{
OutboundTag: groupTag,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.URLTest(context.Background(), &daemon.URLTestRequest{
OutboundTag: groupTag,
})
return err
}
func (c *CommandClient) SetClashMode(newMode string) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.SetClashMode(context.Background(), &daemon.ClashMode{
Mode: newMode,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.SetClashMode(context.Background(), &daemon.ClashMode{
Mode: newMode,
})
return err
}
func (c *CommandClient) CloseConnection(connId string) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.CloseConnection(context.Background(), &daemon.CloseConnectionRequest{
Id: connId,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.CloseConnection(context.Background(), &daemon.CloseConnectionRequest{
Id: connId,
})
return err
}
func (c *CommandClient) CloseConnections() error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.CloseAllConnections(context.Background(), &emptypb.Empty{})
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.CloseAllConnections(context.Background(), &emptypb.Empty{})
return err
}
func (c *CommandClient) ServiceReload() error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.ReloadService(context.Background(), &emptypb.Empty{})
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.ReloadService(context.Background(), &emptypb.Empty{})
return err
}
func (c *CommandClient) ServiceClose() error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.StopService(context.Background(), &emptypb.Empty{})
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.StopService(context.Background(), &emptypb.Empty{})
return err
}
func (c *CommandClient) ClearLogs() error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.ClearLogs(context.Background(), &emptypb.Empty{})
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.ClearLogs(context.Background(), &emptypb.Empty{})
return err
}
func (c *CommandClient) GetSystemProxyStatus() (*SystemProxyStatus, error) {
return callWithResult(c, func(client daemon.StartedServiceClient) (*SystemProxyStatus, error) {
status, err := client.GetSystemProxyStatus(context.Background(), &emptypb.Empty{})
if err != nil {
return nil, err
}
return systemProxyStatusFromGRPC(status), nil
})
client, err := c.getClientForCall()
if err != nil {
return nil, err
}
status, err := client.GetSystemProxyStatus(context.Background(), &emptypb.Empty{})
if err != nil {
return nil, err
}
return SystemProxyStatusFromGRPC(status), nil
}
func (c *CommandClient) SetSystemProxyEnabled(isEnabled bool) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.SetSystemProxyEnabled(context.Background(), &daemon.SetSystemProxyEnabledRequest{
Enabled: isEnabled,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.SetSystemProxyEnabled(context.Background(), &daemon.SetSystemProxyEnabledRequest{
Enabled: isEnabled,
})
return err
}
func (c *CommandClient) GetDeprecatedNotes() (DeprecatedNoteIterator, error) {
return callWithResult(c, func(client daemon.StartedServiceClient) (DeprecatedNoteIterator, error) {
warnings, err := client.GetDeprecatedWarnings(context.Background(), &emptypb.Empty{})
if err != nil {
return nil, err
}
var notes []*DeprecatedNote
for _, warning := range warnings.Warnings {
notes = append(notes, &DeprecatedNote{
Description: warning.Message,
MigrationLink: warning.MigrationLink,
})
}
return newIterator(notes), nil
})
client, err := c.getClientForCall()
if err != nil {
return nil, err
}
warnings, err := client.GetDeprecatedWarnings(context.Background(), &emptypb.Empty{})
if err != nil {
return nil, err
}
var notes []*DeprecatedNote
for _, warning := range warnings.Warnings {
notes = append(notes, &DeprecatedNote{
Description: warning.Message,
MigrationLink: warning.MigrationLink,
})
}
return newIterator(notes), nil
}
func (c *CommandClient) GetStartedAt() (int64, error) {
return callWithResult(c, func(client daemon.StartedServiceClient) (int64, error) {
startedAt, err := client.GetStartedAt(context.Background(), &emptypb.Empty{})
if err != nil {
return 0, err
}
return startedAt.StartedAt, nil
})
client, err := c.getClientForCall()
if err != nil {
return 0, err
}
startedAt, err := client.GetStartedAt(context.Background(), &emptypb.Empty{})
if err != nil {
return 0, err
}
return startedAt.StartedAt, nil
}
func (c *CommandClient) SetGroupExpand(groupTag string, isExpand bool) error {
_, err := callWithResult(c, func(client daemon.StartedServiceClient) (*emptypb.Empty, error) {
return client.SetGroupExpand(context.Background(), &daemon.SetGroupExpandRequest{
GroupTag: groupTag,
IsExpand: isExpand,
})
client, err := c.getClientForCall()
if err != nil {
return err
}
_, err = client.SetGroupExpand(context.Background(), &daemon.SetGroupExpandRequest{
GroupTag: groupTag,
IsExpand: isExpand,
})
return err
}

View File

@@ -43,7 +43,7 @@ type CommandServerHandler interface {
}
func NewCommandServer(handler CommandServerHandler, platformInterface PlatformInterface) (*CommandServer, error) {
ctx := baseContext(platformInterface)
ctx := BaseContext(platformInterface)
platformWrapper := &platformInterfaceWrapper{
iif: platformInterface,
useProcFS: platformInterface.UseProcFS(),
@@ -60,7 +60,6 @@ func NewCommandServer(handler CommandServerHandler, platformInterface PlatformIn
Handler: (*platformHandler)(server),
Debug: sDebug,
LogMaxLines: sLogMaxLines,
OOMKiller: memoryLimitEnabled,
// WorkingDirectory: sWorkingPath,
// TempDirectory: sTempPath,
// UserID: sUserID,
@@ -160,7 +159,6 @@ func (s *CommandServer) Close() {
s.grpcServer.Stop()
}
common.Close(s.listener)
s.StartedService.Close()
}
type OverrideOptions struct {

View File

@@ -3,7 +3,6 @@ package libbox
import (
"slices"
"strings"
"time"
"github.com/sagernet/sing-box/daemon"
M "github.com/sagernet/sing/common/metadata"
@@ -32,11 +31,11 @@ type OutboundGroup struct {
Selectable bool
Selected string
IsExpand bool
itemList []*OutboundGroupItem
ItemList []*OutboundGroupItem
}
func (g *OutboundGroup) GetItems() OutboundGroupItemIterator {
return newIterator(g.itemList)
return newIterator(g.ItemList)
}
type OutboundGroupIterator interface {
@@ -62,119 +61,12 @@ const (
ConnectionStateClosed
)
const (
ConnectionEventNew = iota
ConnectionEventUpdate
ConnectionEventClosed
)
const (
closedConnectionMaxAge = int64((5 * time.Minute) / time.Millisecond)
)
type ConnectionEvent struct {
Type int32
ID string
Connection *Connection
UplinkDelta int64
DownlinkDelta int64
ClosedAt int64
}
type ConnectionEvents struct {
Reset bool
events []*ConnectionEvent
}
func (c *ConnectionEvents) Iterator() ConnectionEventIterator {
return newIterator(c.events)
}
type ConnectionEventIterator interface {
Next() *ConnectionEvent
HasNext() bool
}
type Connections struct {
connectionMap map[string]*Connection
input []Connection
filtered []Connection
filterState int32
filterApplied bool
}
func NewConnections() *Connections {
return &Connections{
connectionMap: make(map[string]*Connection),
}
}
func (c *Connections) ApplyEvents(events *ConnectionEvents) {
if events == nil {
return
}
if events.Reset {
c.connectionMap = make(map[string]*Connection)
}
for _, event := range events.events {
switch event.Type {
case ConnectionEventNew:
if event.Connection != nil {
conn := *event.Connection
c.connectionMap[event.ID] = &conn
}
case ConnectionEventUpdate:
if conn, ok := c.connectionMap[event.ID]; ok {
conn.Uplink = event.UplinkDelta
conn.Downlink = event.DownlinkDelta
conn.UplinkTotal += event.UplinkDelta
conn.DownlinkTotal += event.DownlinkDelta
}
case ConnectionEventClosed:
if event.Connection != nil {
conn := *event.Connection
conn.ClosedAt = event.ClosedAt
conn.Uplink = 0
conn.Downlink = 0
c.connectionMap[event.ID] = &conn
continue
}
if conn, ok := c.connectionMap[event.ID]; ok {
conn.ClosedAt = event.ClosedAt
conn.Uplink = 0
conn.Downlink = 0
}
}
}
c.evictClosedConnections(time.Now().UnixMilli())
c.input = c.input[:0]
for _, conn := range c.connectionMap {
c.input = append(c.input, *conn)
}
if c.filterApplied {
c.FilterState(c.filterState)
} else {
c.filtered = c.filtered[:0]
c.filtered = append(c.filtered, c.input...)
}
}
func (c *Connections) evictClosedConnections(nowMilliseconds int64) {
for id, conn := range c.connectionMap {
if conn.ClosedAt == 0 {
continue
}
if nowMilliseconds-conn.ClosedAt > closedConnectionMaxAge {
delete(c.connectionMap, id)
}
}
input []Connection
filtered []Connection
}
func (c *Connections) FilterState(state int32) {
c.filterApplied = true
c.filterState = state
c.filtered = c.filtered[:0]
switch state {
case ConnectionStateAll:
@@ -267,12 +159,12 @@ type Connection struct {
Rule string
Outbound string
OutboundType string
chainList []string
ChainList []string
ProcessInfo *ProcessInfo
}
func (c *Connection) Chain() StringIterator {
return newIterator(c.chainList)
return newIterator(c.ChainList)
}
func (c *Connection) DisplayDestination() string {
@@ -292,7 +184,7 @@ type ConnectionIterator interface {
HasNext() bool
}
func statusMessageFromGRPC(status *daemon.Status) *StatusMessage {
func StatusMessageFromGRPC(status *daemon.Status) *StatusMessage {
if status == nil {
return nil
}
@@ -309,7 +201,7 @@ func statusMessageFromGRPC(status *daemon.Status) *StatusMessage {
}
}
func outboundGroupIteratorFromGRPC(groups *daemon.Groups) OutboundGroupIterator {
func OutboundGroupIteratorFromGRPC(groups *daemon.Groups) OutboundGroupIterator {
if groups == nil || len(groups.Group) == 0 {
return newIterator([]*OutboundGroup{})
}
@@ -323,7 +215,7 @@ func outboundGroupIteratorFromGRPC(groups *daemon.Groups) OutboundGroupIterator
IsExpand: g.IsExpand,
}
for _, item := range g.Items {
libboxGroup.itemList = append(libboxGroup.itemList, &OutboundGroupItem{
libboxGroup.ItemList = append(libboxGroup.ItemList, &OutboundGroupItem{
Tag: item.Tag,
Type: item.Type,
URLTestTime: item.UrlTestTime,
@@ -335,7 +227,7 @@ func outboundGroupIteratorFromGRPC(groups *daemon.Groups) OutboundGroupIterator
return newIterator(libboxGroups)
}
func connectionFromGRPC(conn *daemon.Connection) Connection {
func ConnectionFromGRPC(conn *daemon.Connection) Connection {
var processInfo *ProcessInfo
if conn.ProcessInfo != nil {
processInfo = &ProcessInfo{
@@ -367,45 +259,23 @@ func connectionFromGRPC(conn *daemon.Connection) Connection {
Rule: conn.Rule,
Outbound: conn.Outbound,
OutboundType: conn.OutboundType,
chainList: conn.ChainList,
ChainList: conn.ChainList,
ProcessInfo: processInfo,
}
}
func connectionEventFromGRPC(event *daemon.ConnectionEvent) *ConnectionEvent {
if event == nil {
func ConnectionsFromGRPC(connections *daemon.Connections) []Connection {
if connections == nil || len(connections.Connections) == 0 {
return nil
}
libboxEvent := &ConnectionEvent{
Type: int32(event.Type),
ID: event.Id,
UplinkDelta: event.UplinkDelta,
DownlinkDelta: event.DownlinkDelta,
ClosedAt: event.ClosedAt,
var libboxConnections []Connection
for _, conn := range connections.Connections {
libboxConnections = append(libboxConnections, ConnectionFromGRPC(conn))
}
if event.Connection != nil {
conn := connectionFromGRPC(event.Connection)
libboxEvent.Connection = &conn
}
return libboxEvent
return libboxConnections
}
func connectionEventsFromGRPC(events *daemon.ConnectionEvents) *ConnectionEvents {
if events == nil {
return nil
}
libboxEvents := &ConnectionEvents{
Reset: events.Reset_,
}
for _, event := range events.Events {
if libboxEvent := connectionEventFromGRPC(event); libboxEvent != nil {
libboxEvents.events = append(libboxEvents.events, libboxEvent)
}
}
return libboxEvents
}
func systemProxyStatusFromGRPC(status *daemon.SystemProxyStatus) *SystemProxyStatus {
func SystemProxyStatusFromGRPC(status *daemon.SystemProxyStatus) *SystemProxyStatus {
if status == nil {
return nil
}
@@ -415,7 +285,7 @@ func systemProxyStatusFromGRPC(status *daemon.SystemProxyStatus) *SystemProxySta
}
}
func systemProxyStatusToGRPC(status *SystemProxyStatus) *daemon.SystemProxyStatus {
func SystemProxyStatusToGRPC(status *SystemProxyStatus) *daemon.SystemProxyStatus {
if status == nil {
return nil
}

View File

@@ -22,7 +22,7 @@ import (
"github.com/sagernet/sing/service/filemanager"
)
func baseContext(platformInterface PlatformInterface) context.Context {
func BaseContext(platformInterface PlatformInterface) context.Context {
dnsRegistry := include.DNSTransportRegistry()
if platformInterface != nil {
if localTransport := platformInterface.LocalDNSTransport(); localTransport != nil {
@@ -45,7 +45,7 @@ func parseConfig(ctx context.Context, configContent string) (option.Options, err
}
func CheckConfig(configContent string) error {
ctx := baseContext(nil)
ctx := BaseContext(nil)
options, err := parseConfig(ctx, configContent)
if err != nil {
return err
@@ -189,7 +189,7 @@ func (s *interfaceMonitorStub) MyInterface() string {
}
func FormatConfig(configContent string) (*StringBox, error) {
options, err := parseConfig(baseContext(nil), configContent)
options, err := parseConfig(BaseContext(nil), configContent)
if err != nil {
return nil, err
}

View File

@@ -1,257 +0,0 @@
{
"version": 1,
"variables": {
"VERSION": "$(go run github.com/sagernet/sing-box/cmd/internal/read_tag@latest)",
"WORKSPACE_ROOT": "../../..",
"DEPLOY_ANDROID": "${WORKSPACE_ROOT}/sing-box-for-android/app/libs",
"DEPLOY_APPLE": "${WORKSPACE_ROOT}/sing-box-for-apple",
"DEPLOY_WINDOWS": "${WORKSPACE_ROOT}/sing-box-for-windows/local-packages"
},
"packages": [
{
"id": "libbox",
"path": ".",
"java_package": "io.nekohasekai.libbox",
"csharp_namespace": "SagerNet",
"csharp_entrypoint": "Libbox",
"apple_prefix": "Libbox"
}
],
"builds": [
{
"id": "android-main",
"packages": ["libbox"],
"default": {
"tags": [
"with_gvisor",
"with_quic",
"with_wireguard",
"with_utls",
"with_naive_outbound",
"with_clash_api",
"badlinkname",
"tfogo_checklinkname0",
"with_tailscale",
"ts_omit_logtail",
"ts_omit_ssh",
"ts_omit_drive",
"ts_omit_taildrop",
"ts_omit_webclient",
"ts_omit_doctor",
"ts_omit_capture",
"ts_omit_kube",
"ts_omit_aws",
"ts_omit_synology",
"ts_omit_bird"
],
"ldflags": "-X github.com/sagernet/sing-box/constant.Version=${VERSION} -X internal/godebug.defaultGODEBUG=multipathtcp=0 -s -w -buildid= -checklinkname=0",
"trimpath": true
}
},
{
"id": "android-legacy",
"packages": ["libbox"],
"default": {
"tags": [
"with_gvisor",
"with_quic",
"with_wireguard",
"with_utls",
"with_clash_api",
"badlinkname",
"tfogo_checklinkname0",
"with_tailscale",
"ts_omit_logtail",
"ts_omit_ssh",
"ts_omit_drive",
"ts_omit_taildrop",
"ts_omit_webclient",
"ts_omit_doctor",
"ts_omit_capture",
"ts_omit_kube",
"ts_omit_aws",
"ts_omit_synology",
"ts_omit_bird"
],
"ldflags": "-X github.com/sagernet/sing-box/constant.Version=${VERSION} -X internal/godebug.defaultGODEBUG=multipathtcp=0 -s -w -buildid= -checklinkname=0",
"trimpath": true
}
},
{
"id": "apple",
"packages": ["libbox"],
"default": {
"tags": [
"with_gvisor",
"with_quic",
"with_wireguard",
"with_utls",
"with_naive_outbound",
"with_clash_api",
"badlinkname",
"tfogo_checklinkname0",
"with_dhcp",
"grpcnotrace",
"with_tailscale",
"ts_omit_logtail",
"ts_omit_ssh",
"ts_omit_drive",
"ts_omit_taildrop",
"ts_omit_webclient",
"ts_omit_doctor",
"ts_omit_capture",
"ts_omit_kube",
"ts_omit_aws",
"ts_omit_synology",
"ts_omit_bird"
],
"ldflags": "-X github.com/sagernet/sing-box/constant.Version=${VERSION} -X internal/godebug.defaultGODEBUG=multipathtcp=0 -s -w -buildid= -checklinkname=0",
"trimpath": true
},
"overrides": [
{
"match": { "os": "ios" },
"tags_append": ["with_low_memory"]
},
{
"match": { "os": "tvos" },
"tags_append": ["with_low_memory"]
}
]
},
{
"id": "windows",
"packages": ["libbox"],
"default": {
"tags": [
"with_gvisor",
"with_quic",
"with_wireguard",
"with_utls",
"with_naive_outbound",
"with_purego",
"with_clash_api",
"badlinkname",
"tfogo_checklinkname0",
"with_tailscale",
"ts_omit_logtail",
"ts_omit_ssh",
"ts_omit_drive",
"ts_omit_taildrop",
"ts_omit_webclient",
"ts_omit_doctor",
"ts_omit_capture",
"ts_omit_kube",
"ts_omit_aws",
"ts_omit_synology",
"ts_omit_bird"
],
"ldflags": "-X github.com/sagernet/sing-box/constant.Version=${VERSION} -X internal/godebug.defaultGODEBUG=multipathtcp=0 -s -w -buildid= -checklinkname=0",
"trimpath": true
}
}
],
"platforms": [
{
"type": "android",
"build": "android-main",
"min_sdk": 23,
"ndk_version": "28.0.13004108",
"lib_name": "box",
"languages": [{ "type": "java" }],
"artifacts": [
{
"type": "aar",
"output_path": "libbox.aar",
"execute_after": [
"if [ -d \"${DEPLOY_ANDROID}\" ]; then",
" rm -f \"${DEPLOY_ANDROID}/$$(basename \"${OUTPUT_PATH}\")\"",
" mv \"${OUTPUT_PATH}\" \"${DEPLOY_ANDROID}/\"",
"fi"
]
}
]
},
{
"type": "android",
"build": "android-legacy",
"min_sdk": 21,
"ndk_version": "28.0.13004108",
"lib_name": "box",
"languages": [{ "type": "java" }],
"artifacts": [
{
"type": "aar",
"output_path": "libbox-legacy.aar",
"execute_after": [
"if [ -d \"${DEPLOY_ANDROID}\" ]; then",
" rm -f \"${DEPLOY_ANDROID}/$$(basename \"${OUTPUT_PATH}\")\"",
" mv \"${OUTPUT_PATH}\" \"${DEPLOY_ANDROID}/\"",
"fi"
]
}
]
},
{
"type": "apple",
"build": "apple",
"targets": [
"ios/arm64",
"ios/simulator/arm64",
"ios/simulator/amd64",
"tvos/arm64",
"tvos/simulator/arm64",
"tvos/simulator/amd64",
"macos/arm64",
"macos/amd64"
],
"languages": [{ "type": "objc" }],
"artifacts": [
{
"type": "xcframework",
"module_name": "Libbox",
"execute_after": [
"if [ -d \"${DEPLOY_APPLE}\" ]; then",
" rm -rf \"${DEPLOY_APPLE}/${MODULE_NAME}.xcframework\"",
" mv \"${OUTPUT_PATH}\" \"${DEPLOY_APPLE}/\"",
"fi"
]
}
]
},
{
"type": "csharp",
"build": "windows",
"targets": [
"windows/amd64"
],
"languages": [{ "type": "csharp" }],
"artifacts": [
{
"type": "nuget",
"package_id": "SagerNet.Libbox",
"package_version": "0.0.0-local",
"execute_after": {
"windows": [
"$$deployPath = '${DEPLOY_WINDOWS}'",
"if (Test-Path $$deployPath) {",
" Remove-Item \"$$deployPath\\${PACKAGE_ID}.*.nupkg\" -ErrorAction SilentlyContinue",
" Move-Item -Force '${OUTPUT_PATH}' \"$$deployPath\\\"",
" $$cachePath = if ($$env:NUGET_PACKAGES) { $$env:NUGET_PACKAGES } else { \"$$env:USERPROFILE\\.nuget\\packages\" }",
" Remove-Item -Recurse -Force \"$$cachePath\\sagernet.libbox\\${PACKAGE_VERSION}\" -ErrorAction SilentlyContinue",
"}"
],
"default": [
"if [ -d \"${DEPLOY_WINDOWS}\" ]; then",
" rm -f \"${DEPLOY_WINDOWS}/${PACKAGE_ID}.*.nupkg\"",
" mv \"${OUTPUT_PATH}\" \"${DEPLOY_WINDOWS}/\"",
" cache_path=\"$${NUGET_PACKAGES:-$${HOME}/.nuget/packages}\"",
" rm -rf \"$${cache_path}/sagernet.libbox/${PACKAGE_VERSION}\"",
"fi"
]
}
}
]
}
]
}

View File

@@ -4,23 +4,20 @@ import (
"math"
runtimeDebug "runtime/debug"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/common/conntrack"
)
var memoryLimitEnabled bool
func SetMemoryLimit(enabled bool) {
memoryLimitEnabled = enabled
const memoryLimitGo = 45 * 1024 * 1024
const memoryLimit = 45 * 1024 * 1024
const memoryLimitGo = memoryLimit / 1.5
if enabled {
runtimeDebug.SetGCPercent(10)
if C.IsIos {
runtimeDebug.SetMemoryLimit(memoryLimitGo)
}
runtimeDebug.SetMemoryLimit(memoryLimitGo)
conntrack.KillerEnabled = true
conntrack.MemoryLimit = memoryLimit
} else {
runtimeDebug.SetGCPercent(100)
if C.IsIos {
runtimeDebug.SetMemoryLimit(math.MaxInt64)
}
runtimeDebug.SetMemoryLimit(math.MaxInt64)
conntrack.KillerEnabled = false
}
}

View File

@@ -5,7 +5,6 @@ import (
"bytes"
"compress/gzip"
"encoding/binary"
"io"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/varbin"
@@ -36,7 +35,7 @@ type ErrorMessage struct {
func (e *ErrorMessage) Encode() []byte {
var buffer bytes.Buffer
buffer.WriteByte(MessageTypeError)
writeString(&buffer, e.Message)
varbin.Write(&buffer, binary.BigEndian, e.Message)
return buffer.Bytes()
}
@@ -50,7 +49,7 @@ func DecodeErrorMessage(data []byte) (*ErrorMessage, error) {
return nil, E.New("invalid message")
}
var message ErrorMessage
message.Message, err = readString(reader)
message.Message, err = varbin.ReadValue[string](reader, binary.BigEndian)
if err != nil {
return nil, err
}
@@ -88,7 +87,7 @@ func (e *ProfileEncoder) Encode() []byte {
binary.Write(&buffer, binary.BigEndian, uint16(len(e.profiles)))
for _, preview := range e.profiles {
binary.Write(&buffer, binary.BigEndian, preview.ProfileID)
writeString(&buffer, preview.Name)
varbin.Write(&buffer, binary.BigEndian, preview.Name)
binary.Write(&buffer, binary.BigEndian, preview.Type)
}
return buffer.Bytes()
@@ -118,7 +117,7 @@ func (d *ProfileDecoder) Decode(data []byte) error {
if err != nil {
return err
}
profile.Name, err = readString(reader)
profile.Name, err = varbin.ReadValue[string](reader, binary.BigEndian)
if err != nil {
return err
}
@@ -179,11 +178,11 @@ func (c *ProfileContent) Encode() []byte {
buffer.WriteByte(1)
gWriter := gzip.NewWriter(buffer)
writer := bufio.NewWriter(gWriter)
writeStringBuffered(writer, c.Name)
varbin.Write(writer, binary.BigEndian, c.Name)
binary.Write(writer, binary.BigEndian, c.Type)
writeStringBuffered(writer, c.Config)
varbin.Write(writer, binary.BigEndian, c.Config)
if c.Type != ProfileTypeLocal {
writeStringBuffered(writer, c.RemotePath)
varbin.Write(writer, binary.BigEndian, c.RemotePath)
}
if c.Type == ProfileTypeRemote {
binary.Write(writer, binary.BigEndian, c.AutoUpdate)
@@ -215,7 +214,7 @@ func DecodeProfileContent(data []byte) (*ProfileContent, error) {
}
bReader := varbin.StubReader(gReader)
var content ProfileContent
content.Name, err = readString(bReader)
content.Name, err = varbin.ReadValue[string](bReader, binary.BigEndian)
if err != nil {
return nil, err
}
@@ -223,12 +222,12 @@ func DecodeProfileContent(data []byte) (*ProfileContent, error) {
if err != nil {
return nil, err
}
content.Config, err = readString(bReader)
content.Config, err = varbin.ReadValue[string](bReader, binary.BigEndian)
if err != nil {
return nil, err
}
if content.Type != ProfileTypeLocal {
content.RemotePath, err = readString(bReader)
content.RemotePath, err = varbin.ReadValue[string](bReader, binary.BigEndian)
if err != nil {
return nil, err
}
@@ -251,28 +250,3 @@ func DecodeProfileContent(data []byte) (*ProfileContent, error) {
}
return &content, nil
}
func readString(reader io.ByteReader) (string, error) {
length, err := binary.ReadUvarint(reader)
if err != nil {
return "", err
}
buf := make([]byte, length)
for i := range buf {
buf[i], err = reader.ReadByte()
if err != nil {
return "", err
}
}
return string(buf), nil
}
func writeString(buffer *bytes.Buffer, value string) {
varbin.WriteUvarint(buffer, uint64(len(value)))
buffer.WriteString(value)
}
func writeStringBuffered(writer *bufio.Writer, value string) {
varbin.WriteUvarint(writer, uint64(len(value)))
writer.WriteString(value)
}

View File

@@ -1,27 +0,0 @@
package libbox
import (
"strings"
"golang.org/x/mod/semver"
)
func CompareSemver(left string, right string) bool {
normalizedLeft := normalizeSemver(left)
if !semver.IsValid(normalizedLeft) {
return false
}
normalizedRight := normalizeSemver(right)
if !semver.IsValid(normalizedRight) {
return false
}
return semver.Compare(normalizedLeft, normalizedRight) > 0
}
func normalizeSemver(version string) string {
trimmedVersion := strings.TrimSpace(version)
if strings.HasPrefix(trimmedVersion, "v") {
return trimmedVersion
}
return "v" + trimmedVersion
}

View File

@@ -1,16 +0,0 @@
package libbox
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestCompareSemver(t *testing.T) {
t.Parallel()
require.False(t, CompareSemver("1.13.0-rc.4", "1.13.0"))
require.True(t, CompareSemver("1.13.1", "1.13.0"))
require.False(t, CompareSemver("v1.13.0", "1.13.0"))
require.False(t, CompareSemver("1.13.0-", "1.13.0"))
}

View File

@@ -126,9 +126,6 @@ func (w *platformInterfaceWrapper) NetworkInterfaces() ([]adapter.NetworkInterfa
Constrained: isDefault && w.isConstrained,
})
}
interfaces = common.UniqBy(interfaces, func(it adapter.NetworkInterface) string {
return it.Name
})
return interfaces, nil
}

View File

@@ -84,15 +84,15 @@ type StatsServiceServer interface {
type UnimplementedStatsServiceServer struct{}
func (UnimplementedStatsServiceServer) GetStats(context.Context, *GetStatsRequest) (*GetStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "method GetStats not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetStats not implemented")
}
func (UnimplementedStatsServiceServer) QueryStats(context.Context, *QueryStatsRequest) (*QueryStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "method QueryStats not implemented")
return nil, status.Errorf(codes.Unimplemented, "method QueryStats not implemented")
}
func (UnimplementedStatsServiceServer) GetSysStats(context.Context, *SysStatsRequest) (*SysStatsResponse, error) {
return nil, status.Error(codes.Unimplemented, "method GetSysStats not implemented")
return nil, status.Errorf(codes.Unimplemented, "method GetSysStats not implemented")
}
func (UnimplementedStatsServiceServer) mustEmbedUnimplementedStatsServiceServer() {}
func (UnimplementedStatsServiceServer) testEmbeddedByValue() {}
@@ -105,7 +105,7 @@ type UnsafeStatsServiceServer interface {
}
func RegisterStatsServiceServer(s grpc.ServiceRegistrar, srv StatsServiceServer) {
// If the following call panics, it indicates UnimplementedStatsServiceServer was
// If the following call pancis, it indicates UnimplementedStatsServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.

82
go.mod
View File

@@ -3,47 +3,46 @@ module github.com/sagernet/sing-box
go 1.24.7
require (
github.com/anthropics/anthropic-sdk-go v1.26.0
github.com/anthropics/anthropic-sdk-go v1.19.0
github.com/anytls/sing-anytls v0.0.11
github.com/caddyserver/certmagic v0.25.0
github.com/coder/websocket v1.8.14
github.com/cretz/bine v0.2.0
github.com/database64128/tfo-go/v2 v2.3.2
github.com/database64128/tfo-go/v2 v2.3.1
github.com/go-chi/chi/v5 v5.2.3
github.com/go-chi/render v1.0.3
github.com/godbus/dbus/v5 v5.2.1
github.com/gofrs/uuid/v5 v5.4.0
github.com/insomniacslk/dhcp v0.0.0-20251020182700-175e84fbb167
github.com/keybase/go-keychain v0.0.1
github.com/libdns/acmedns v0.5.0
github.com/libdns/alidns v1.0.6-beta.3
github.com/libdns/cloudflare v0.2.2
github.com/logrusorgru/aurora v2.0.3+incompatible
github.com/metacubex/utls v1.8.4
github.com/metacubex/utls v1.8.3
github.com/mholt/acmez/v3 v3.1.4
github.com/miekg/dns v1.1.69
github.com/openai/openai-go/v3 v3.23.0
github.com/openai/openai-go/v3 v3.15.0
github.com/oschwald/maxminddb-golang v1.13.1
github.com/sagernet/asc-go v0.0.0-20241217030726-d563060fe4e1
github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a
github.com/sagernet/cors v1.2.1
github.com/sagernet/cronet-go v0.0.0-20260227112944-17c7ef18afa6
github.com/sagernet/cronet-go/all v0.0.0-20260227112944-17c7ef18afa6
github.com/sagernet/cronet-go v0.0.0-20251231120443-1d2d7341cbd8
github.com/sagernet/cronet-go/all v0.0.0-20251231120443-1d2d7341cbd8
github.com/sagernet/fswatch v0.1.1
github.com/sagernet/gomobile v0.1.12
github.com/sagernet/gomobile v0.1.11
github.com/sagernet/gvisor v0.0.0-20250811.0-sing-box-mod.1
github.com/sagernet/quic-go v0.59.0-sing-box-mod.4
github.com/sagernet/sing v0.8.0-beta.16.0.20260227013657-e419e9875a07
github.com/sagernet/sing-mux v0.3.4
github.com/sagernet/sing-quic v0.6.0-beta.13
github.com/sagernet/quic-go v0.58.0-sing-box-mod.1
github.com/sagernet/sing v0.8.0-beta.9.0.20260109141034-50e1ed36c6f6
github.com/sagernet/sing-mux v0.3.3
github.com/sagernet/sing-quic v0.6.0-beta.8
github.com/sagernet/sing-shadowsocks v0.2.8
github.com/sagernet/sing-shadowsocks2 v0.2.1
github.com/sagernet/sing-shadowtls v0.2.1-0.20250503051639-fcd445d33c11
github.com/sagernet/sing-tun v0.8.0-beta.18
github.com/sagernet/sing-tun v0.8.0-beta.11.0.20260107060547-525f783d005b
github.com/sagernet/sing-vmess v0.2.8-0.20250909125414-3aed155119a1
github.com/sagernet/smux v1.5.50-sing-box-mod.1
github.com/sagernet/smux v1.5.34-mod.2
github.com/sagernet/tailscale v1.92.4-sing-box-1.13-mod.6
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20260224074747-506b7631853c
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20250917110311-16510ac47288
github.com/sagernet/ws v0.0.0-20231204124109-acfe8907c854
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
@@ -54,7 +53,7 @@ require (
golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93
golang.org/x/mod v0.31.0
golang.org/x/net v0.48.0
golang.org/x/sys v0.41.0
golang.org/x/sys v0.39.0
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
google.golang.org/grpc v1.77.0
google.golang.org/protobuf v1.36.11
@@ -105,35 +104,28 @@ require (
github.com/prometheus-community/pro-bing v0.4.0 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_loong64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_loong64_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_mips64le v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_mipsle v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_mipsle_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_riscv64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/linux_riscv64_musl v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20260227112350-bf468eec914d // indirect
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20251231115934-b87a9ae9bd80 // indirect
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a // indirect
github.com/sagernet/nftables v0.3.0-beta.4 // indirect
github.com/spf13/pflag v1.0.9 // indirect

178
go.sum
View File

@@ -8,8 +8,8 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/anthropics/anthropic-sdk-go v1.26.0 h1:oUTzFaUpAevfuELAP1sjL6CQJ9HHAfT7CoSYSac11PY=
github.com/anthropics/anthropic-sdk-go v1.26.0/go.mod h1:qUKmaW+uuPB64iy1l+4kOSvaLqPXnHTTBKH6RVZ7q5Q=
github.com/anthropics/anthropic-sdk-go v1.19.0 h1:mO6E+ffSzLRvR/YUH9KJC0uGw0uV8GjISIuzem//3KE=
github.com/anthropics/anthropic-sdk-go v1.19.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
github.com/anytls/sing-anytls v0.0.11 h1:w8e9Uj1oP3m4zxkyZDewPk0EcQbvVxb7Nn+rapEx4fc=
github.com/anytls/sing-anytls v0.0.11/go.mod h1:7rjN6IukwysmdusYsrV51Fgu1uW6vsrdd6ctjnEAln8=
github.com/caddyserver/certmagic v0.25.0 h1:VMleO/XA48gEWes5l+Fh6tRWo9bHkhwAEhx63i+F5ic=
@@ -29,17 +29,16 @@ github.com/cretz/bine v0.2.0 h1:8GiDRGlTgz+o8H9DSnsl+5MeBK4HsExxgl6WgzOCuZo=
github.com/cretz/bine v0.2.0/go.mod h1:WU4o9QR9wWp8AVKtTM1XD5vUHkEqnf2vVSo6dBqbetI=
github.com/database64128/netx-go v0.1.1 h1:dT5LG7Gs7zFZBthFBbzWE6K8wAHjSNAaK7wCYZT7NzM=
github.com/database64128/netx-go v0.1.1/go.mod h1:LNlYVipaYkQArRFDNNJ02VkNV+My9A5XR/IGS7sIBQc=
github.com/database64128/tfo-go/v2 v2.3.2 h1:UhZMKiMq3swZGUiETkLBDzQnZBPSAeBMClpJGlnJ5Fw=
github.com/database64128/tfo-go/v2 v2.3.2/go.mod h1:GC3uB5oa4beGpCUbRb2ZOWP73bJJFmMyAVgQSO7r724=
github.com/database64128/tfo-go/v2 v2.3.1 h1:EGE+ELd5/AQ0X6YBlQ9RgKs8+kciNhgN3d8lRvfEJQw=
github.com/database64128/tfo-go/v2 v2.3.1/go.mod h1:k9wcpg/8i5zenspBkc9jUEYehpZZccBnCElzOJB++bU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk=
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU=
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/florianl/go-nfqueue/v2 v2.0.2 h1:FL5lQTeetgpCvac1TRwSfgaXUn0YSO7WzGvWNIp3JPE=
@@ -104,8 +103,6 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/libdns/acmedns v0.5.0 h1:5pRtmUj4Lb/QkNJSl1xgOGBUJTWW7RjpNaIhjpDXjPE=
github.com/libdns/acmedns v0.5.0/go.mod h1:X7UAFP1Ep9NpTwWpVlrZzJLR7epynAy0wrIxSPFgKjQ=
github.com/libdns/alidns v1.0.6-beta.3 h1:KAmb7FQ1tRzKsaAUGa7ZpGKAMRANwg7+1c7tUbSELq8=
github.com/libdns/alidns v1.0.6-beta.3/go.mod h1:RECwyQ88e9VqQVtSrvX76o1ux3gQUKGzMgxICi+u7Ec=
github.com/libdns/cloudflare v0.2.2 h1:XWHv+C1dDcApqazlh08Q6pjytYLgR2a+Y3xrXFu0vsI=
@@ -118,8 +115,8 @@ github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0
github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o=
github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
github.com/metacubex/utls v1.8.4 h1:HmL9nUApDdWSkgUyodfwF6hSjtiwCGGdyhaSpEejKpg=
github.com/metacubex/utls v1.8.4/go.mod h1:kncGGVhFaoGn5M3pFe3SXhZCzsbCJayNOH4UEqTKTko=
github.com/metacubex/utls v1.8.3 h1:0m/yCxm3SK6kWve2lKiFb1pue1wHitJ8sQQD4Ikqde4=
github.com/metacubex/utls v1.8.3/go.mod h1:kncGGVhFaoGn5M3pFe3SXhZCzsbCJayNOH4UEqTKTko=
github.com/mholt/acmez/v3 v3.1.4 h1:DyzZe/RnAzT3rpZj/2Ii5xZpiEvvYk3cQEN/RmqxwFQ=
github.com/mholt/acmez/v3 v3.1.4/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
github.com/miekg/dns v1.1.69 h1:Kb7Y/1Jo+SG+a2GtfoFUfDkG//csdRPwRLkCsxDG9Sc=
@@ -128,8 +125,8 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/openai/openai-go/v3 v3.23.0 h1:FRFwTcB4FoWFtIunTY/8fgHvzSHgqbfWjiCwOMVrsvw=
github.com/openai/openai-go/v3 v3.23.0/go.mod h1:cdufnVK14cWcT9qA1rRtrXx4FTRsgbDPW7Ia7SS5cZo=
github.com/openai/openai-go/v3 v3.15.0 h1:hk99rM7YPz+M99/5B/zOQcVwFRLLMdprVGx1vaZ8XMo=
github.com/openai/openai-go/v3 v3.15.0/go.mod h1:cdufnVK14cWcT9qA1rRtrXx4FTRsgbDPW7Ia7SS5cZo=
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
@@ -152,102 +149,89 @@ github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a h1:+NkI2670SQpQWvkk
github.com/sagernet/bbolt v0.0.0-20231014093535-ea5cb2fe9f0a/go.mod h1:63s7jpZqcDAIpj8oI/1v4Izok+npJOHACFCU6+huCkM=
github.com/sagernet/cors v1.2.1 h1:Cv5Z8y9YSD6Gm+qSpNrL3LO4lD3eQVvbFYJSG7JCMHQ=
github.com/sagernet/cors v1.2.1/go.mod h1:O64VyOjjhrkLmQIjF4KGRrJO/5dVXFdpEmCW/eISRAI=
github.com/sagernet/cronet-go v0.0.0-20260227112944-17c7ef18afa6 h1:Ato+guxmEL4uezcYV1UUUDpAv9HlcJQ7BZt2zpnzjuw=
github.com/sagernet/cronet-go v0.0.0-20260227112944-17c7ef18afa6/go.mod h1:hwFHBEjjthyEquDULbr4c4ucMedp8Drb6Jvm2kt/0Bw=
github.com/sagernet/cronet-go/all v0.0.0-20260227112944-17c7ef18afa6 h1:0ldSjcR5Gt/o+otTvUAmJ28FCLab9lnlpEhxRCMQpRA=
github.com/sagernet/cronet-go/all v0.0.0-20260227112944-17c7ef18afa6/go.mod h1:xVwYoNCyv9tF7W1RJlUdDbT4bn5tyqtyTe1P1ZY2VP8=
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20260227112350-bf468eec914d h1:tudlBYdQHIWctKIdf7pceBOFIUIISK6yFivwsxhxDk0=
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20260227112350-bf468eec914d/go.mod h1:XXDwdjX/T8xftoeJxQmbBoYXZp8MAPFR2CwbFuTpEtw=
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20260227112350-bf468eec914d h1:F5EsQlIknj0HlExBFR4EXW69dYj0MpK1HCpKhL/weEs=
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:iNiUGoLtnr8/JTuVNj7XJbmpOAp2C6+B81KDrPxwaZM=
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20260227112350-bf468eec914d h1:9SQ6I2Y2radd6RyWEfV+9s1Q9Kth54B6gBHuJWNzQas=
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20260227112350-bf468eec914d/go.mod h1:19ILNUOGIzRdOqa2mq+iY0JoHxuieB7/lnjYeaA2vEc=
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20260227112350-bf468eec914d h1:+XoeknBi6+s6epDAS3BkEsp5zGqEJsT9L8JEcaq+0nE=
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:JxzGyQf94Cr6sBShKqODGDyRUlESfJK/Njcz9Lz6qMQ=
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20260227112350-bf468eec914d h1:poqfhHJAg+7BtABn4cue7V4y8Kb2eZ1Cy0j+bhDangw=
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:KN+9T9TBycGOLzmKU4QdcHAJEj6Nlx48ifnlTvvHMvs=
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20260227112350-bf468eec914d h1:nH6rtfqWbui9zQPjd18cpvZncGvn21UcVLtmeUoQKXs=
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:kojvtUc29KKnk8hs2QIANynVR59921SnGWA9kXohHc0=
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20260227112350-bf468eec914d h1:HtnjWZzSQBaP29XJ5NoIps1TVZ7DUC7R0NH7IyhJ5Ag=
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20260227112350-bf468eec914d/go.mod h1:hkQzRE5GDbaH1/ioqYh0Taho4L6i0yLRCVEZ5xHz5M0=
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20260227112350-bf468eec914d h1:E2DWx0Agrj8Fi745S+otYW+W0rL2I8+Z2rZCFqGYPvQ=
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:tzVJFTOm66UxLxy6K0ZN5Ic2PC79e+sKKnt+V9puEa4=
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20260227112350-bf468eec914d h1:j7f/rBwPlO1RpFQeM35QVHymVXGVo6d8WTz4i4SjcPo=
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20260227112350-bf468eec914d/go.mod h1:M/pN6m3j0HFU6/y83n0HU6GLYys3tYdr/xTE8hVEGMo=
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20260227112350-bf468eec914d h1:hz8kkcHGMe7QBTpbqkaw89ZFsfX+UN5F5RIDrroDkx8=
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20260227112350-bf468eec914d/go.mod h1:cGh5hO6eljCo6KMQ/Cel8Xgq4+etL0awZLRBDVG1EZQ=
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20260227112350-bf468eec914d h1:TNFaO19ySEyqG79j5+dYb+w4ivusrTXanWuogmC4VM0=
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:JFE0/cxaKkx0wqPMZU7MgaplQlU0zudv82dROJjClKU=
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20260227112350-bf468eec914d h1:Ewc/wR3yu/hOwG/p49nI9TwYmYv3Llm5DA6fSb1w8hY=
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:vU8VftFeSt7fURCa3JXD6+k6ss1YAX+idQjPvHmJ2tI=
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20260227112350-bf468eec914d h1:PJ24NkPNpMrLGNRdb6moEqJo8gfhYcIRZmQD8jPPCJk=
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:vCe4OUuL+XOUge9v3MyTD45BnuAXiH+DkjN9quDXJzQ=
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20260227112350-bf468eec914d h1:IaUghNA8cOmmwvzUPKPsfhiG0KmpWpE0mFZl85T5/Bw=
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20260227112350-bf468eec914d/go.mod h1:w9amBWrvjtohQzBGCKJ7LCh22LhTIJs4sE7cYaKQzM0=
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20260227112350-bf468eec914d h1:whbeDcr9dDWPr45Is9QV6OHAncrBWLJtPuo4uyEJFBg=
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:TqlsFtcYS/etTeck46kHBeT8Le0Igw1Q/AV88UnMS3s=
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20260227112350-bf468eec914d h1:ecHgaGMvikNYjsfULekdXjL/cQJXCS38yvHaKVMWtXc=
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:B6Qd0vys8sv9OKVRN6J9RqDzYRGE938Fb2zrYdBDyTQ=
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20260227112350-bf468eec914d h1:no7Cb54+vv1bQ39zFp+JIHKO8Tu3sTwqz8SoOAuV/Ek=
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:3tXMMFY7AHugOVBZ5Al7cL7JKsnFOe5bMVr0hZPk3ow=
github.com/sagernet/cronet-go/lib/linux_loong64 v0.0.0-20260227112350-bf468eec914d h1:DqBSbam9KAzBgDInOoNy4K0baSJyxGWESxrDewU5aSs=
github.com/sagernet/cronet-go/lib/linux_loong64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:Wt5uFdU3tnmm8YzobYewwdF7Mt6SucRQg6xeTNWC3Tk=
github.com/sagernet/cronet-go/lib/linux_loong64_musl v0.0.0-20260227112350-bf468eec914d h1:fOR5i+hRyjG8ZzPSG6URkoTKr5qYOJfxZ58zd8HBteM=
github.com/sagernet/cronet-go/lib/linux_loong64_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:lyIF6wKBLwWa5ZXaAKbAoewewl+yCHo2iYev39Mbj4E=
github.com/sagernet/cronet-go/lib/linux_mips64le v0.0.0-20260227112350-bf468eec914d h1:hEQGQI+PfUzYBVas4NWw8WiEUsATco6vwv+t4qTtgtw=
github.com/sagernet/cronet-go/lib/linux_mips64le v0.0.0-20260227112350-bf468eec914d/go.mod h1:H46PnSTTZNcZokLLiDeMDaHiS1l14PH3tzWi0eykjD8=
github.com/sagernet/cronet-go/lib/linux_mipsle v0.0.0-20260227112350-bf468eec914d h1:AzzJ5AtZlwTbU5QOSixZdPLTjzWKCun3AobQChKy0W8=
github.com/sagernet/cronet-go/lib/linux_mipsle v0.0.0-20260227112350-bf468eec914d/go.mod h1:RBhSUDAKWq7fswtV4nQUQhuaTLcX3ettR7teA7/yf2w=
github.com/sagernet/cronet-go/lib/linux_mipsle_musl v0.0.0-20260227112350-bf468eec914d h1:9Tp7s/WX4DZLx4ues8G38G2OV7eQbeuU2COEZEbGcF0=
github.com/sagernet/cronet-go/lib/linux_mipsle_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:wRzoIOGG4xbpp3Gh3triLKwMwYriScXzFtunLYhY4w0=
github.com/sagernet/cronet-go/lib/linux_riscv64 v0.0.0-20260227112350-bf468eec914d h1:T9EVZKTyZHOamwevomUZnJ6TQNc09I/BwK+L5HJCJj8=
github.com/sagernet/cronet-go/lib/linux_riscv64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:LNiZXmWil1OPwKCheqQjtakZlJuKGFz+iv2eGF76Hhs=
github.com/sagernet/cronet-go/lib/linux_riscv64_musl v0.0.0-20260227112350-bf468eec914d h1:FZmThI7xScJRPERFiA4L2l9KCwA0oi8/lEOajIKEtUQ=
github.com/sagernet/cronet-go/lib/linux_riscv64_musl v0.0.0-20260227112350-bf468eec914d/go.mod h1:YFDGKTkpkJGc5+hnX/RYosZyTWg9h+68VB55fYRRLYc=
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20260227112350-bf468eec914d h1:BCC/b8bL0dD9Q4ghgKABV/EsMe0J8GE/l7hcRdPkUXQ=
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20260227112350-bf468eec914d/go.mod h1:aaX0YGl8nhGmfRWI8bc3BtDjY8Vzx6O0cS/e1uqxDq4=
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20260227112350-bf468eec914d h1:3l463BXnC/X42ow2zqHm9Y/K4GM6aRsKUIZBcFxr2+Q=
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:EdzMKA96xITc42QEI+ct4SwqX8Dn3ltKK8wzdkLWpSc=
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20260227112350-bf468eec914d h1:+XHEZ/z5NgPfjOAzOwfbQzR+42qaDNB0nv+fAOcd6Pc=
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20260227112350-bf468eec914d/go.mod h1:qix4kv1TTAJ5tY4lJ9vjhe9EY4mM+B7H5giOhbxDVcc=
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20260227112350-bf468eec914d h1:sYWbP+qCt9Rhb1yGaIRY7HVLtaQZmrHWR0obc5+Q1qc=
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:lm9w/oCCRyBiUa3G8lDQTT8x/ONUvgVR2iV9fVzUZB8=
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20260227112350-bf468eec914d h1:r6eOVlAfmcUMD5nfz+mPd/aORevUKhcvxA1z1GdPnG8=
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20260227112350-bf468eec914d/go.mod h1:n34YyLgapgjWdKa0IoeczjAFCwD3/dxbsH5sucKw0bw=
github.com/sagernet/cronet-go v0.0.0-20251231120443-1d2d7341cbd8 h1:yQtAGNBF8c2cYdnsfkbOp8ShdWlpncua96KXqHd9svs=
github.com/sagernet/cronet-go v0.0.0-20251231120443-1d2d7341cbd8/go.mod h1:hwFHBEjjthyEquDULbr4c4ucMedp8Drb6Jvm2kt/0Bw=
github.com/sagernet/cronet-go/all v0.0.0-20251231120443-1d2d7341cbd8 h1:KMmO1p8bO2BTq7NdTNdmjurh7rg6S1vRfHrEvFPw8bY=
github.com/sagernet/cronet-go/all v0.0.0-20251231120443-1d2d7341cbd8/go.mod h1:A4cCrezhvuNnb2K6UVuM+IQDZXBjTi2iK5K/wBu7Olg=
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20251231115934-b87a9ae9bd80 h1:9dBJpiOdU121TZ+mOb3LW8T5tZ3ZBjSUB9zg2F9Cltg=
github.com/sagernet/cronet-go/lib/android_386 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:XXDwdjX/T8xftoeJxQmbBoYXZp8MAPFR2CwbFuTpEtw=
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20251231115934-b87a9ae9bd80 h1:vODDxjRh7CHHnfoapv1+pDFtEMPOFxWnBlVSskXCOSU=
github.com/sagernet/cronet-go/lib/android_amd64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:iNiUGoLtnr8/JTuVNj7XJbmpOAp2C6+B81KDrPxwaZM=
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20251231115934-b87a9ae9bd80 h1:ivqSFl9JHV63ELQkSTzeUkltLb+owOZ9GoRBTDYC0Jk=
github.com/sagernet/cronet-go/lib/android_arm v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:19ILNUOGIzRdOqa2mq+iY0JoHxuieB7/lnjYeaA2vEc=
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:UKvOG4VuR+8c4VFA11d1dXZfANXNjDecXeZJfmjHTY4=
github.com/sagernet/cronet-go/lib/android_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:JxzGyQf94Cr6sBShKqODGDyRUlESfJK/Njcz9Lz6qMQ=
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20251231115934-b87a9ae9bd80 h1:qtw9lKXyuDG8kBB3PyhD/xidVhffST7caUEQ2fS6Sbo=
github.com/sagernet/cronet-go/lib/darwin_amd64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:KN+9T9TBycGOLzmKU4QdcHAJEj6Nlx48ifnlTvvHMvs=
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:Vkx2x+f67sOoL5K4YRX6RfIfM8z14YeGqj6Xc/CnzWg=
github.com/sagernet/cronet-go/lib/darwin_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:kojvtUc29KKnk8hs2QIANynVR59921SnGWA9kXohHc0=
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80 h1:+jfTH4yHr+toXrUH2xGUnhql641IIvbBTCKpz9WvTxU=
github.com/sagernet/cronet-go/lib/ios_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:hkQzRE5GDbaH1/ioqYh0Taho4L6i0yLRCVEZ5xHz5M0=
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:Ow8A3f3YAgBJ4HMYJVv8RK0PsvQhAt4GHTBsiuaXJYw=
github.com/sagernet/cronet-go/lib/ios_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:tzVJFTOm66UxLxy6K0ZN5Ic2PC79e+sKKnt+V9puEa4=
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80 h1:HvlLfPq+WxRQtilZD4L3WgMYwZnwGnFoQX29eZtdm1Y=
github.com/sagernet/cronet-go/lib/ios_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:M/pN6m3j0HFU6/y83n0HU6GLYys3tYdr/xTE8hVEGMo=
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20251231115934-b87a9ae9bd80 h1:nGPM9VpMWhfPDX4SOk7p+GvHYAAv2LOCE4hNIue+eAY=
github.com/sagernet/cronet-go/lib/linux_386 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:cGh5hO6eljCo6KMQ/Cel8Xgq4+etL0awZLRBDVG1EZQ=
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20251231115934-b87a9ae9bd80 h1:GvFk6EH/PmIqxJN2OQarXt6vNrS/tgQ78b/WPvXhAmg=
github.com/sagernet/cronet-go/lib/linux_386_musl v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:JFE0/cxaKkx0wqPMZU7MgaplQlU0zudv82dROJjClKU=
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20251231115934-b87a9ae9bd80 h1:SaIqJcgT0D3fK4zdXRA3E9kzq4mtfwkNDzAnv6bFQjE=
github.com/sagernet/cronet-go/lib/linux_amd64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:vU8VftFeSt7fURCa3JXD6+k6ss1YAX+idQjPvHmJ2tI=
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20251231115934-b87a9ae9bd80 h1:pEnLOmZjgWk9jaezJt/RvMX91Mugq4KwyxdW2mYvrUU=
github.com/sagernet/cronet-go/lib/linux_amd64_musl v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:vCe4OUuL+XOUge9v3MyTD45BnuAXiH+DkjN9quDXJzQ=
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20251231115934-b87a9ae9bd80 h1:/HPZI7eocQxu7Ho5gBoOWEQwH+IgmnD0pG7RVQzeZK8=
github.com/sagernet/cronet-go/lib/linux_arm v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:w9amBWrvjtohQzBGCKJ7LCh22LhTIJs4sE7cYaKQzM0=
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:ZxVgKaU1r748EPp2y6GNZhMxtxxbC9xHSmWnJP4qDdM=
github.com/sagernet/cronet-go/lib/linux_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:TqlsFtcYS/etTeck46kHBeT8Le0Igw1Q/AV88UnMS3s=
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20251231115934-b87a9ae9bd80 h1:v8cjiAvXjWOkC4Li05MQrQIJ1NH2vK2OWMc/mS/c7FI=
github.com/sagernet/cronet-go/lib/linux_arm64_musl v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:B6Qd0vys8sv9OKVRN6J9RqDzYRGE938Fb2zrYdBDyTQ=
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20251231115934-b87a9ae9bd80 h1:Hh3at5gCCc2YZNbErv/jMqO4FhYN3oms892bLwK0aHI=
github.com/sagernet/cronet-go/lib/linux_arm_musl v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:3tXMMFY7AHugOVBZ5Al7cL7JKsnFOe5bMVr0hZPk3ow=
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80 h1:Mz9JbTXKCfIvnvDxzID7YtkC/5Y/6MG1BWNBmOoTkaQ=
github.com/sagernet/cronet-go/lib/tvos_amd64_simulator v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:aaX0YGl8nhGmfRWI8bc3BtDjY8Vzx6O0cS/e1uqxDq4=
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:xM0Z8k3WheuWfJsZNKEBkRyAFK/NEB01UNxElMf8oy8=
github.com/sagernet/cronet-go/lib/tvos_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:EdzMKA96xITc42QEI+ct4SwqX8Dn3ltKK8wzdkLWpSc=
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80 h1:Nvvip5xwIw9GoTg/Tch0uYW11wdc+VhZHTp81LuVZGY=
github.com/sagernet/cronet-go/lib/tvos_arm64_simulator v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:qix4kv1TTAJ5tY4lJ9vjhe9EY4mM+B7H5giOhbxDVcc=
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20251231115934-b87a9ae9bd80 h1:BkrIlIUtKHtSXSZe2xGjgEtbP0z1LYo0PXeM25obSSM=
github.com/sagernet/cronet-go/lib/windows_amd64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:lm9w/oCCRyBiUa3G8lDQTT8x/ONUvgVR2iV9fVzUZB8=
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20251231115934-b87a9ae9bd80 h1:oJY5DO13q1x4XORhZYR7QVmYdFFhbua5w7Hp74h6Z10=
github.com/sagernet/cronet-go/lib/windows_arm64 v0.0.0-20251231115934-b87a9ae9bd80/go.mod h1:n34YyLgapgjWdKa0IoeczjAFCwD3/dxbsH5sucKw0bw=
github.com/sagernet/fswatch v0.1.1 h1:YqID+93B7VRfqIH3PArW/XpJv5H4OLEVWDfProGoRQs=
github.com/sagernet/fswatch v0.1.1/go.mod h1:nz85laH0mkQqJfaOrqPpkwtU1znMFNVTpT/5oRsVz/o=
github.com/sagernet/gomobile v0.1.12 h1:XwzjZaclFF96deLqwAgK8gU3w0M2A8qxgDmhV+A0wjg=
github.com/sagernet/gomobile v0.1.12/go.mod h1:A8l3FlHi2D/+mfcd4HHvk5DGFPW/ShFb9jHP5VmSiDY=
github.com/sagernet/gomobile v0.1.11 h1:niMQAspvuThup5eRZQpsGcbM76zAvnsGr7RUIpnQMDQ=
github.com/sagernet/gomobile v0.1.11/go.mod h1:A8l3FlHi2D/+mfcd4HHvk5DGFPW/ShFb9jHP5VmSiDY=
github.com/sagernet/gvisor v0.0.0-20250811.0-sing-box-mod.1 h1:AzCE2RhBjLJ4WIWc/GejpNh+z30d5H1hwaB0nD9eY3o=
github.com/sagernet/gvisor v0.0.0-20250811.0-sing-box-mod.1/go.mod h1:NJKBtm9nVEK3iyOYWsUlrDQuoGh4zJ4KOPhSYVidvQ4=
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a h1:ObwtHN2VpqE0ZNjr6sGeT00J8uU7JF4cNUdb44/Duis=
github.com/sagernet/netlink v0.0.0-20240612041022-b9a21c07ac6a/go.mod h1:xLnfdiJbSp8rNqYEdIW/6eDO4mVoogml14Bh2hSiFpM=
github.com/sagernet/nftables v0.3.0-beta.4 h1:kbULlAwAC3jvdGAC1P5Fa3GSxVwQJibNenDW2zaXr8I=
github.com/sagernet/nftables v0.3.0-beta.4/go.mod h1:OQXAjvjNGGFxaTgVCSTRIhYB5/llyVDeapVoENYBDS8=
github.com/sagernet/quic-go v0.59.0-sing-box-mod.4 h1:6qvrUW79S+CrPwWz6cMePXohgjHoKxLo3c+MDhNwc3o=
github.com/sagernet/quic-go v0.59.0-sing-box-mod.4/go.mod h1:OqILvS182CyOol5zNNo6bguvOGgXzV459+chpRaUC+4=
github.com/sagernet/sing v0.8.0-beta.16.0.20260227013657-e419e9875a07 h1:LQqb+xtR5uqF6bePmJQ3sAToF/kMCjxSnz17HnboXA8=
github.com/sagernet/sing v0.8.0-beta.16.0.20260227013657-e419e9875a07/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing-mux v0.3.4 h1:ZQplKl8MNXutjzbMVtWvWG31fohhgOfCuUZR4dVQ8+s=
github.com/sagernet/sing-mux v0.3.4/go.mod h1:QvlKMyNBNrQoyX4x+gq028uPbLM2XeRpWtDsWBJbFSk=
github.com/sagernet/sing-quic v0.6.0-beta.13 h1:umDr6GC5fVbOIoTvqV4544wY61zEN+ObQwVGNP8sX1M=
github.com/sagernet/sing-quic v0.6.0-beta.13/go.mod h1:K5bWvITOm4vE10fwLfrWpw27bCoVJ+tfQ79tOWg+Ko8=
github.com/sagernet/quic-go v0.58.0-sing-box-mod.1 h1:E9yZrU0ZxSiW5RrGUnFZeI02EIMdAAv0RxdoxXCqZyk=
github.com/sagernet/quic-go v0.58.0-sing-box-mod.1/go.mod h1:OqILvS182CyOol5zNNo6bguvOGgXzV459+chpRaUC+4=
github.com/sagernet/sing v0.6.9/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing v0.8.0-beta.9.0.20260109141034-50e1ed36c6f6 h1:TgmW2IKThK+3wWhoB/UKjwdW2NUyWqCyXZoCYTMizv8=
github.com/sagernet/sing v0.8.0-beta.9.0.20260109141034-50e1ed36c6f6/go.mod h1:ARkL0gM13/Iv5VCZmci/NuoOlePoIsW0m7BWfln/Hak=
github.com/sagernet/sing-mux v0.3.3 h1:YFgt9plMWzH994BMZLmyKL37PdIVaIilwP0Jg+EcLfw=
github.com/sagernet/sing-mux v0.3.3/go.mod h1:pht8iFY4c9Xltj7rhVd208npkNaeCxzyXCgulDPLUDA=
github.com/sagernet/sing-quic v0.6.0-beta.8 h1:Y0P8WTqWpfg80rLFsDfF22QumM+HEAjRQ2o+8Dv+vDs=
github.com/sagernet/sing-quic v0.6.0-beta.8/go.mod h1:Y3YVjPutLHLQvYjGtUFH+w8YmM4MTd19NDzxLZGNGIs=
github.com/sagernet/sing-shadowsocks v0.2.8 h1:PURj5PRoAkqeHh2ZW205RWzN9E9RtKCVCzByXruQWfE=
github.com/sagernet/sing-shadowsocks v0.2.8/go.mod h1:lo7TWEMDcN5/h5B8S0ew+r78ZODn6SwVaFhvB6H+PTI=
github.com/sagernet/sing-shadowsocks2 v0.2.1 h1:dWV9OXCeFPuYGHb6IRqlSptVnSzOelnqqs2gQ2/Qioo=
github.com/sagernet/sing-shadowsocks2 v0.2.1/go.mod h1:RnXS0lExcDAovvDeniJ4IKa2IuChrdipolPYWBv9hWQ=
github.com/sagernet/sing-shadowtls v0.2.1-0.20250503051639-fcd445d33c11 h1:tK+75l64tm9WvEFrYRE1t0YxoFdWQqw/h7Uhzj0vJ+w=
github.com/sagernet/sing-shadowtls v0.2.1-0.20250503051639-fcd445d33c11/go.mod h1:sWqKnGlMipCHaGsw1sTTlimyUpgzP4WP3pjhCsYt9oA=
github.com/sagernet/sing-tun v0.8.0-beta.18 h1:C6oHxP9BNBVEVdC9ABMTXmKej9mUVtcuw2v+IiBS8yw=
github.com/sagernet/sing-tun v0.8.0-beta.18/go.mod h1:+HAK/y9GZljdT0KYKMYDR8MjjqnqDDQZYp5ZZQoRzS8=
github.com/sagernet/sing-tun v0.8.0-beta.11.0.20260107060547-525f783d005b h1:MqPEFejgxqechqBn1OkL+9JPW0W4AiGCI0Y1JhglIlQ=
github.com/sagernet/sing-tun v0.8.0-beta.11.0.20260107060547-525f783d005b/go.mod h1:+HAK/y9GZljdT0KYKMYDR8MjjqnqDDQZYp5ZZQoRzS8=
github.com/sagernet/sing-vmess v0.2.8-0.20250909125414-3aed155119a1 h1:aSwUNYUkVyVvdmBSufR8/nRFonwJeKSIROxHcm5br9o=
github.com/sagernet/sing-vmess v0.2.8-0.20250909125414-3aed155119a1/go.mod h1:P11scgTxMxVVQ8dlM27yNm3Cro40mD0+gHbnqrNGDuY=
github.com/sagernet/smux v1.5.50-sing-box-mod.1 h1:XkJcivBC9V4wBjiGXIXZ229aZCU1hzcbp6kSkkyQ478=
github.com/sagernet/smux v1.5.50-sing-box-mod.1/go.mod h1:NjhsCEWedJm7eFLyhuBgIEzwfhRmytrUoiLluxs5Sk8=
github.com/sagernet/smux v1.5.34-mod.2 h1:gkmBjIjlJ2zQKpLigOkFur5kBKdV6bNRoFu2WkltRQ4=
github.com/sagernet/smux v1.5.34-mod.2/go.mod h1:0KW0+R+ycvA2INW4gbsd7BNyg+HEfLIAxa5N02/28Zc=
github.com/sagernet/tailscale v1.92.4-sing-box-1.13-mod.6 h1:eYz/OpMqWCvO2++iw3dEuzrlfC2xv78GdlGvprIM6O8=
github.com/sagernet/tailscale v1.92.4-sing-box-1.13-mod.6/go.mod h1:m87GAn4UcesHQF3leaPFEINZETO5za1LGn1GJdNDgNc=
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20260224074747-506b7631853c h1:f9cXNB+IOOPnR8DOLMTpr42jf7naxh5Un5Y09BBf5Cg=
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20260224074747-506b7631853c/go.mod h1:WUxgxUDZoCF2sxVmW+STSxatP02Qn3FcafTiI2BLtE0=
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20250917110311-16510ac47288 h1:E2tZFeg9mGYGQ7E7BbxMv1cU35HxwgRm6tPKI2Pp7DA=
github.com/sagernet/wireguard-go v0.0.2-beta.1.0.20250917110311-16510ac47288/go.mod h1:WUxgxUDZoCF2sxVmW+STSxatP02Qn3FcafTiI2BLtE0=
github.com/sagernet/ws v0.0.0-20231204124109-acfe8907c854 h1:6uUiZcDRnZSAegryaUGwPC/Fj13JSHwiTftrXhMmYOc=
github.com/sagernet/ws v0.0.0-20231204124109-acfe8907c854/go.mod h1:LtfoSK3+NG57tvnVEHgcuBW9ujgE8enPSgzgwStwCAA=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
@@ -255,7 +239,14 @@ github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiT
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ=
@@ -346,8 +337,9 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
@@ -380,8 +372,6 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,10 +0,0 @@
package include
import (
"github.com/sagernet/sing-box/adapter/service"
"github.com/sagernet/sing-box/service/oomkiller"
)
func registerOOMKillerService(registry *service.Registry) {
oomkiller.RegisterService(registry)
}

View File

@@ -137,7 +137,6 @@ func ServiceRegistry() *service.Registry {
registerDERPService(registry)
registerCCMService(registry)
registerOCMService(registry)
registerOOMKillerService(registry)
return registry
}

View File

@@ -2,7 +2,6 @@ package option
import (
"github.com/sagernet/sing/common/auth"
"github.com/sagernet/sing/common/byteformats"
"github.com/sagernet/sing/common/json/badoption"
)
@@ -27,14 +26,12 @@ type NaiveInboundOptions struct {
type NaiveOutboundOptions struct {
DialerOptions
ServerOptions
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
InsecureConcurrency int `json:"insecure_concurrency,omitempty"`
ExtraHeaders badoption.HTTPHeader `json:"extra_headers,omitempty"`
ReceiveWindow *byteformats.MemoryBytes `json:"stream_receive_window,omitempty"`
UDPOverTCP *UDPOverTCPOptions `json:"udp_over_tcp,omitempty"`
QUIC bool `json:"quic,omitempty"`
QUICCongestionControl string `json:"quic_congestion_control,omitempty"`
QUICSessionReceiveWindow *byteformats.MemoryBytes `json:"quic_session_receive_window,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
InsecureConcurrency int `json:"insecure_concurrency,omitempty"`
ExtraHeaders badoption.HTTPHeader `json:"extra_headers,omitempty"`
UDPOverTCP *UDPOverTCPOptions `json:"udp_over_tcp,omitempty"`
QUIC bool `json:"quic,omitempty"`
QUICCongestionControl string `json:"quic_congestion_control,omitempty"`
OutboundTLSOptionsContainer
}

View File

@@ -1,14 +0,0 @@
package option
import (
"github.com/sagernet/sing/common/byteformats"
"github.com/sagernet/sing/common/json/badoption"
)
type OOMKillerServiceOptions struct {
MemoryLimit *byteformats.MemoryBytes `json:"memory_limit,omitempty"`
SafetyMargin *byteformats.MemoryBytes `json:"safety_margin,omitempty"`
MinInterval badoption.Duration `json:"min_interval,omitempty"`
MaxInterval badoption.Duration `json:"max_interval,omitempty"`
ChecksBeforeLimit int `json:"checks_before_limit,omitempty"`
}

View File

@@ -12,23 +12,22 @@ import (
type TailscaleEndpointOptions struct {
DialerOptions
StateDirectory string `json:"state_directory,omitempty"`
AuthKey string `json:"auth_key,omitempty"`
ControlURL string `json:"control_url,omitempty"`
Ephemeral bool `json:"ephemeral,omitempty"`
Hostname string `json:"hostname,omitempty"`
AcceptRoutes bool `json:"accept_routes,omitempty"`
ExitNode string `json:"exit_node,omitempty"`
ExitNodeAllowLANAccess bool `json:"exit_node_allow_lan_access,omitempty"`
AdvertiseRoutes []netip.Prefix `json:"advertise_routes,omitempty"`
AdvertiseExitNode bool `json:"advertise_exit_node,omitempty"`
AdvertiseTags badoption.Listable[string] `json:"advertise_tags,omitempty"`
RelayServerPort *uint16 `json:"relay_server_port,omitempty"`
RelayServerStaticEndpoints []netip.AddrPort `json:"relay_server_static_endpoints,omitempty"`
SystemInterface bool `json:"system_interface,omitempty"`
SystemInterfaceName string `json:"system_interface_name,omitempty"`
SystemInterfaceMTU uint32 `json:"system_interface_mtu,omitempty"`
UDPTimeout UDPTimeoutCompat `json:"udp_timeout,omitempty"`
StateDirectory string `json:"state_directory,omitempty"`
AuthKey string `json:"auth_key,omitempty"`
ControlURL string `json:"control_url,omitempty"`
Ephemeral bool `json:"ephemeral,omitempty"`
Hostname string `json:"hostname,omitempty"`
AcceptRoutes bool `json:"accept_routes,omitempty"`
ExitNode string `json:"exit_node,omitempty"`
ExitNodeAllowLANAccess bool `json:"exit_node_allow_lan_access,omitempty"`
AdvertiseRoutes []netip.Prefix `json:"advertise_routes,omitempty"`
AdvertiseExitNode bool `json:"advertise_exit_node,omitempty"`
RelayServerPort *uint16 `json:"relay_server_port,omitempty"`
RelayServerStaticEndpoints []netip.AddrPort `json:"relay_server_static_endpoints,omitempty"`
SystemInterface bool `json:"system_interface,omitempty"`
SystemInterfaceName string `json:"system_interface_name,omitempty"`
SystemInterfaceMTU uint32 `json:"system_interface_mtu,omitempty"`
UDPTimeout UDPTimeoutCompat `json:"udp_timeout,omitempty"`
}
type TailscaleDNSServerOptions struct {

View File

@@ -31,7 +31,6 @@ type _ACMEDNS01ChallengeOptions struct {
Provider string `json:"provider,omitempty"`
AliDNSOptions ACMEDNS01AliDNSOptions `json:"-"`
CloudflareOptions ACMEDNS01CloudflareOptions `json:"-"`
ACMEDNSOptions ACMEDNS01ACMEDNSOptions `json:"-"`
}
type ACMEDNS01ChallengeOptions _ACMEDNS01ChallengeOptions
@@ -43,8 +42,6 @@ func (o ACMEDNS01ChallengeOptions) MarshalJSON() ([]byte, error) {
v = o.AliDNSOptions
case C.DNSProviderCloudflare:
v = o.CloudflareOptions
case C.DNSProviderACMEDNS:
v = o.ACMEDNSOptions
case "":
return nil, E.New("missing provider type")
default:
@@ -64,8 +61,6 @@ func (o *ACMEDNS01ChallengeOptions) UnmarshalJSON(bytes []byte) error {
v = &o.AliDNSOptions
case C.DNSProviderCloudflare:
v = &o.CloudflareOptions
case C.DNSProviderACMEDNS:
v = &o.ACMEDNSOptions
default:
return E.New("unknown provider type: " + o.Provider)
}
@@ -87,10 +82,3 @@ type ACMEDNS01CloudflareOptions struct {
APIToken string `json:"api_token,omitempty"`
ZoneToken string `json:"zone_token,omitempty"`
}
type ACMEDNS01ACMEDNSOptions struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Subdomain string `json:"subdomain,omitempty"`
ServerURL string `json:"server_url,omitempty"`
}

View File

@@ -11,37 +11,36 @@ import (
)
type TunInboundOptions struct {
InterfaceName string `json:"interface_name,omitempty"`
MTU uint32 `json:"mtu,omitempty"`
Address badoption.Listable[netip.Prefix] `json:"address,omitempty"`
AutoRoute bool `json:"auto_route,omitempty"`
IPRoute2TableIndex int `json:"iproute2_table_index,omitempty"`
IPRoute2RuleIndex int `json:"iproute2_rule_index,omitempty"`
AutoRedirect bool `json:"auto_redirect,omitempty"`
AutoRedirectInputMark FwMark `json:"auto_redirect_input_mark,omitempty"`
AutoRedirectOutputMark FwMark `json:"auto_redirect_output_mark,omitempty"`
AutoRedirectResetMark FwMark `json:"auto_redirect_reset_mark,omitempty"`
AutoRedirectNFQueue uint16 `json:"auto_redirect_nfqueue,omitempty"`
AutoRedirectFallbackRuleIndex int `json:"auto_redirect_iproute2_fallback_rule_index,omitempty"`
ExcludeMPTCP bool `json:"exclude_mptcp,omitempty"`
LoopbackAddress badoption.Listable[netip.Addr] `json:"loopback_address,omitempty"`
StrictRoute bool `json:"strict_route,omitempty"`
RouteAddress badoption.Listable[netip.Prefix] `json:"route_address,omitempty"`
RouteAddressSet badoption.Listable[string] `json:"route_address_set,omitempty"`
RouteExcludeAddress badoption.Listable[netip.Prefix] `json:"route_exclude_address,omitempty"`
RouteExcludeAddressSet badoption.Listable[string] `json:"route_exclude_address_set,omitempty"`
IncludeInterface badoption.Listable[string] `json:"include_interface,omitempty"`
ExcludeInterface badoption.Listable[string] `json:"exclude_interface,omitempty"`
IncludeUID badoption.Listable[uint32] `json:"include_uid,omitempty"`
IncludeUIDRange badoption.Listable[string] `json:"include_uid_range,omitempty"`
ExcludeUID badoption.Listable[uint32] `json:"exclude_uid,omitempty"`
ExcludeUIDRange badoption.Listable[string] `json:"exclude_uid_range,omitempty"`
IncludeAndroidUser badoption.Listable[int] `json:"include_android_user,omitempty"`
IncludePackage badoption.Listable[string] `json:"include_package,omitempty"`
ExcludePackage badoption.Listable[string] `json:"exclude_package,omitempty"`
UDPTimeout UDPTimeoutCompat `json:"udp_timeout,omitempty"`
Stack string `json:"stack,omitempty"`
Platform *TunPlatformOptions `json:"platform,omitempty"`
InterfaceName string `json:"interface_name,omitempty"`
MTU uint32 `json:"mtu,omitempty"`
Address badoption.Listable[netip.Prefix] `json:"address,omitempty"`
AutoRoute bool `json:"auto_route,omitempty"`
IPRoute2TableIndex int `json:"iproute2_table_index,omitempty"`
IPRoute2RuleIndex int `json:"iproute2_rule_index,omitempty"`
AutoRedirect bool `json:"auto_redirect,omitempty"`
AutoRedirectInputMark FwMark `json:"auto_redirect_input_mark,omitempty"`
AutoRedirectOutputMark FwMark `json:"auto_redirect_output_mark,omitempty"`
AutoRedirectResetMark FwMark `json:"auto_redirect_reset_mark,omitempty"`
AutoRedirectNFQueue uint16 `json:"auto_redirect_nfqueue,omitempty"`
ExcludeMPTCP bool `json:"exclude_mptcp,omitempty"`
LoopbackAddress badoption.Listable[netip.Addr] `json:"loopback_address,omitempty"`
StrictRoute bool `json:"strict_route,omitempty"`
RouteAddress badoption.Listable[netip.Prefix] `json:"route_address,omitempty"`
RouteAddressSet badoption.Listable[string] `json:"route_address_set,omitempty"`
RouteExcludeAddress badoption.Listable[netip.Prefix] `json:"route_exclude_address,omitempty"`
RouteExcludeAddressSet badoption.Listable[string] `json:"route_exclude_address_set,omitempty"`
IncludeInterface badoption.Listable[string] `json:"include_interface,omitempty"`
ExcludeInterface badoption.Listable[string] `json:"exclude_interface,omitempty"`
IncludeUID badoption.Listable[uint32] `json:"include_uid,omitempty"`
IncludeUIDRange badoption.Listable[string] `json:"include_uid_range,omitempty"`
ExcludeUID badoption.Listable[uint32] `json:"exclude_uid,omitempty"`
ExcludeUIDRange badoption.Listable[string] `json:"exclude_uid_range,omitempty"`
IncludeAndroidUser badoption.Listable[int] `json:"include_android_user,omitempty"`
IncludePackage badoption.Listable[string] `json:"include_package,omitempty"`
ExcludePackage badoption.Listable[string] `json:"exclude_package,omitempty"`
UDPTimeout UDPTimeoutCompat `json:"udp_timeout,omitempty"`
Stack string `json:"stack,omitempty"`
Platform *TunPlatformOptions `json:"platform,omitempty"`
InboundOptions
// Deprecated: removed

View File

@@ -4,7 +4,6 @@ import (
std_bufio "bufio"
"context"
"net"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/adapter/inbound"
@@ -37,22 +36,14 @@ type Inbound struct {
listener *listener.Listener
authenticator *auth.Authenticator
tlsConfig tls.ServerConfig
udpTimeout time.Duration
}
func NewInbound(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.HTTPMixedInboundOptions) (adapter.Inbound, error) {
var udpTimeout time.Duration
if options.UDPTimeout != 0 {
udpTimeout = time.Duration(options.UDPTimeout)
} else {
udpTimeout = C.UDPTimeout
}
inbound := &Inbound{
Adapter: inbound.NewAdapter(C.TypeMixed, tag),
router: uot.NewRouter(router, logger),
logger: logger,
authenticator: auth.NewAuthenticator(options.Users),
udpTimeout: udpTimeout,
}
if options.TLS != nil {
tlsConfig, err := tls.NewServerWithOptions(tls.ServerOptions{
@@ -125,7 +116,7 @@ func (h *Inbound) newConnection(ctx context.Context, conn net.Conn, metadata ada
}
switch headerBytes[0] {
case socks4.Version, socks5.Version:
return socks.HandleConnectionEx(ctx, conn, reader, h.authenticator, adapter.NewUpstreamHandlerEx(metadata, h.newUserConnection, h.streamUserPacketConnection), h.listener, h.udpTimeout, metadata.Source, onClose)
return socks.HandleConnectionEx(ctx, conn, reader, h.authenticator, adapter.NewUpstreamHandlerEx(metadata, h.newUserConnection, h.streamUserPacketConnection), h.listener, metadata.Source, onClose)
default:
return http.HandleConnectionEx(ctx, conn, reader, h.authenticator, adapter.NewUpstreamHandlerEx(metadata, h.newUserConnection, h.streamUserPacketConnection), metadata.Source, onClose)
}

View File

@@ -95,7 +95,6 @@ func (p *paddingConn) writeWithPadding(writer io.Writer, data []byte) (n int, er
binary.BigEndian.PutUint16(header, uint16(len(data)))
header[2] = byte(paddingSize)
common.Must1(buffer.Write(data))
buffer.Extend(paddingSize)
_, err = writer.Write(buffer.Bytes())
if err == nil {
n = len(data)

View File

@@ -127,7 +127,7 @@ func NewOutbound(ctx context.Context, router adapter.Router, logger log.ContextL
var dnsResolver cronet.DNSResolverFunc
if dnsRouter != nil {
dnsResolver = func(dnsContext context.Context, request *mDNS.Msg) *mDNS.Msg {
response, err := dnsRouter.Exchange(dnsContext, request, outboundDialer.(dialer.ResolveDialer).QueryOptions())
response, err := dnsRouter.Exchange(dnsContext, request, adapter.DNSQueryOptions{})
if err != nil {
logger.Error("DNS exchange failed: ", err)
return dns.FixedResponseStatus(request, mDNS.RcodeServerFailure)
@@ -177,7 +177,6 @@ func NewOutbound(ctx context.Context, router adapter.Router, logger log.ContextL
}
client, err := cronet.NewNaiveClient(cronet.NaiveClientOptions{
Context: ctx,
Logger: logger,
ServerAddress: serverAddress,
ServerName: serverName,
Username: options.Username,
@@ -235,7 +234,7 @@ func (h *Outbound) DialContext(ctx context.Context, network string, destination
switch N.NetworkName(network) {
case N.NetworkTCP:
h.logger.InfoContext(ctx, "outbound connection to ", destination)
return h.client.DialEarly(ctx, destination)
return h.client.DialEarly(destination)
case N.NetworkUDP:
if h.uotClient == nil {
return nil, E.New("UDP is not supported unless UDP over TCP is enabled")
@@ -254,10 +253,6 @@ func (h *Outbound) ListenPacket(ctx context.Context, destination M.Socksaddr) (n
return h.uotClient.ListenPacket(ctx, destination)
}
func (h *Outbound) InterfaceUpdated() {
h.client.Engine().CloseAllConnections()
}
func (h *Outbound) Close() error {
return h.client.Close()
}
@@ -271,5 +266,5 @@ type naiveDialer struct {
}
func (d *naiveDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
return d.NaiveClient.DialEarly(ctx, destination)
return d.NaiveClient.DialEarly(destination)
}

View File

@@ -4,7 +4,6 @@ import (
std_bufio "bufio"
"context"
"net"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/adapter/inbound"
@@ -32,22 +31,14 @@ type Inbound struct {
logger logger.ContextLogger
listener *listener.Listener
authenticator *auth.Authenticator
udpTimeout time.Duration
}
func NewInbound(ctx context.Context, router adapter.Router, logger log.ContextLogger, tag string, options option.SocksInboundOptions) (adapter.Inbound, error) {
var udpTimeout time.Duration
if options.UDPTimeout != 0 {
udpTimeout = time.Duration(options.UDPTimeout)
} else {
udpTimeout = C.UDPTimeout
}
inbound := &Inbound{
Adapter: inbound.NewAdapter(C.TypeSOCKS, tag),
router: uot.NewRouter(router, logger),
logger: logger,
authenticator: auth.NewAuthenticator(options.Users),
udpTimeout: udpTimeout,
}
inbound.listener = listener.New(listener.Options{
Context: ctx,
@@ -71,7 +62,7 @@ func (h *Inbound) Close() error {
}
func (h *Inbound) NewConnectionEx(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) {
err := socks.HandleConnectionEx(ctx, conn, std_bufio.NewReader(conn), h.authenticator, adapter.NewUpstreamHandlerEx(metadata, h.newUserConnection, h.streamUserPacketConnection), h.listener, h.udpTimeout, metadata.Source, onClose)
err := socks.HandleConnectionEx(ctx, conn, std_bufio.NewReader(conn), h.authenticator, adapter.NewUpstreamHandlerEx(metadata, h.newUserConnection, h.streamUserPacketConnection), h.listener, metadata.Source, onClose)
N.CloseOnHandshakeFailure(conn, onClose, err)
if err != nil {
if E.IsClosedOrCanceled(err) {

View File

@@ -97,7 +97,6 @@ type Endpoint struct {
exitNodeAllowLANAccess bool
advertiseRoutes []netip.Prefix
advertiseExitNode bool
advertiseTags []string
relayServerPort *uint16
relayServerStaticEndpoints []netip.AddrPort
@@ -210,11 +209,10 @@ func NewEndpoint(ctx context.Context, router adapter.Router, logger log.ContextL
UserLogf: func(format string, args ...any) {
logger.Debug(fmt.Sprintf(format, args...))
},
Ephemeral: options.Ephemeral,
AuthKey: options.AuthKey,
ControlURL: options.ControlURL,
AdvertiseTags: options.AdvertiseTags,
Dialer: &endpointDialer{Dialer: outboundDialer, logger: logger},
Ephemeral: options.Ephemeral,
AuthKey: options.AuthKey,
ControlURL: options.ControlURL,
Dialer: &endpointDialer{Dialer: outboundDialer, logger: logger},
LookupHook: func(ctx context.Context, host string) ([]netip.Addr, error) {
return dnsRouter.Lookup(ctx, host, outboundDialer.(dialer.ResolveDialer).QueryOptions())
},
@@ -246,7 +244,6 @@ func NewEndpoint(ctx context.Context, router adapter.Router, logger log.ContextL
exitNodeAllowLANAccess: options.ExitNodeAllowLANAccess,
advertiseRoutes: options.AdvertiseRoutes,
advertiseExitNode: options.AdvertiseExitNode,
advertiseTags: options.AdvertiseTags,
relayServerPort: options.RelayServerPort,
relayServerStaticEndpoints: options.RelayServerStaticEndpoints,
udpTimeout: udpTimeout,
@@ -362,23 +359,25 @@ func (t *Endpoint) Start(stage adapter.StartStage) error {
localBackend := t.server.ExportLocalBackend()
perfs := &ipn.MaskedPrefs{
Prefs: ipn.Prefs{
RouteAll: t.acceptRoutes,
AdvertiseRoutes: t.advertiseRoutes,
RouteAll: t.acceptRoutes,
},
RouteAllSet: true,
ExitNodeIPSet: true,
AdvertiseRoutesSet: true,
RelayServerPortSet: true,
RelayServerStaticEndpointsSet: true,
RouteAllSet: true,
ExitNodeIPSet: true,
AdvertiseRoutesSet: true,
}
if len(t.advertiseRoutes) > 0 {
perfs.AdvertiseRoutes = t.advertiseRoutes
}
if t.advertiseExitNode {
perfs.AdvertiseRoutes = append(perfs.AdvertiseRoutes, tsaddr.ExitRoutes()...)
}
if t.relayServerPort != nil {
perfs.RelayServerPort = t.relayServerPort
perfs.RelayServerPortSet = true
}
if len(t.relayServerStaticEndpoints) > 0 {
perfs.RelayServerStaticEndpoints = t.relayServerStaticEndpoints
perfs.RelayServerStaticEndpointsSet = true
}
_, err = localBackend.EditPrefs(perfs)
if err != nil {

View File

@@ -99,7 +99,7 @@ func (l *ProxyListener) acceptLoop() {
}
func (l *ProxyListener) accept(ctx context.Context, conn *net.TCPConn) error {
return socks.HandleConnectionEx(ctx, conn, std_bufio.NewReader(conn), l.authenticator, l, nil, 0, M.SocksaddrFromNet(conn.RemoteAddr()), nil)
return socks.HandleConnectionEx(ctx, conn, std_bufio.NewReader(conn), l.authenticator, l, nil, M.SocksaddrFromNet(conn.RemoteAddr()), nil)
}
func (l *ProxyListener) NewConnectionEx(ctx context.Context, conn net.Conn, source M.Socksaddr, destination M.Socksaddr, onClose N.CloseHandlerFunc) {

View File

@@ -174,10 +174,6 @@ func NewInbound(ctx context.Context, router adapter.Router, logger log.ContextLo
if ruleIndex == 0 {
ruleIndex = tun.DefaultIPRoute2RuleIndex
}
autoRedirectFallbackRuleIndex := options.AutoRedirectFallbackRuleIndex
if autoRedirectFallbackRuleIndex == 0 {
autoRedirectFallbackRuleIndex = tun.DefaultIPRoute2AutoRedirectFallbackRuleIndex
}
inputMark := uint32(options.AutoRedirectInputMark)
if inputMark == 0 {
inputMark = tun.DefaultAutoRedirectInputMark
@@ -204,36 +200,35 @@ func NewInbound(ctx context.Context, router adapter.Router, logger log.ContextLo
logger: logger,
inboundOptions: options.InboundOptions,
tunOptions: tun.Options{
Name: options.InterfaceName,
MTU: tunMTU,
GSO: enableGSO,
Inet4Address: inet4Address,
Inet6Address: inet6Address,
AutoRoute: options.AutoRoute,
IPRoute2TableIndex: tableIndex,
IPRoute2RuleIndex: ruleIndex,
IPRoute2AutoRedirectFallbackRuleIndex: autoRedirectFallbackRuleIndex,
AutoRedirectInputMark: inputMark,
AutoRedirectOutputMark: outputMark,
AutoRedirectResetMark: resetMark,
AutoRedirectNFQueue: nfQueue,
ExcludeMPTCP: options.ExcludeMPTCP,
Inet4LoopbackAddress: common.Filter(options.LoopbackAddress, netip.Addr.Is4),
Inet6LoopbackAddress: common.Filter(options.LoopbackAddress, netip.Addr.Is6),
StrictRoute: options.StrictRoute,
IncludeInterface: options.IncludeInterface,
ExcludeInterface: options.ExcludeInterface,
Inet4RouteAddress: inet4RouteAddress,
Inet6RouteAddress: inet6RouteAddress,
Inet4RouteExcludeAddress: inet4RouteExcludeAddress,
Inet6RouteExcludeAddress: inet6RouteExcludeAddress,
IncludeUID: includeUID,
ExcludeUID: excludeUID,
IncludeAndroidUser: options.IncludeAndroidUser,
IncludePackage: options.IncludePackage,
ExcludePackage: options.ExcludePackage,
InterfaceMonitor: networkManager.InterfaceMonitor(),
EXP_MultiPendingPackets: multiPendingPackets,
Name: options.InterfaceName,
MTU: tunMTU,
GSO: enableGSO,
Inet4Address: inet4Address,
Inet6Address: inet6Address,
AutoRoute: options.AutoRoute,
IPRoute2TableIndex: tableIndex,
IPRoute2RuleIndex: ruleIndex,
AutoRedirectInputMark: inputMark,
AutoRedirectOutputMark: outputMark,
AutoRedirectResetMark: resetMark,
AutoRedirectNFQueue: nfQueue,
ExcludeMPTCP: options.ExcludeMPTCP,
Inet4LoopbackAddress: common.Filter(options.LoopbackAddress, netip.Addr.Is4),
Inet6LoopbackAddress: common.Filter(options.LoopbackAddress, netip.Addr.Is6),
StrictRoute: options.StrictRoute,
IncludeInterface: options.IncludeInterface,
ExcludeInterface: options.ExcludeInterface,
Inet4RouteAddress: inet4RouteAddress,
Inet6RouteAddress: inet6RouteAddress,
Inet4RouteExcludeAddress: inet4RouteExcludeAddress,
Inet6RouteExcludeAddress: inet6RouteExcludeAddress,
IncludeUID: includeUID,
ExcludeUID: excludeUID,
IncludeAndroidUser: options.IncludeAndroidUser,
IncludePackage: options.IncludePackage,
ExcludePackage: options.ExcludePackage,
InterfaceMonitor: networkManager.InterfaceMonitor(),
EXP_MultiPendingPackets: multiPendingPackets,
},
udpTimeout: udpTimeout,
stack: options.Stack,

View File

@@ -57,9 +57,7 @@ func NewOutbound(ctx context.Context, router adapter.Router, logger log.ContextL
if err != nil {
return nil, err
}
if outbound.tlsConfig != nil {
outbound.tlsDialer = tls.NewDialer(outboundDialer, outbound.tlsConfig)
}
outbound.tlsDialer = tls.NewDialer(outboundDialer, outbound.tlsConfig)
}
if options.Transport != nil {
outbound.transport, err = v2ray.NewClientTransport(ctx, outbound.dialer, outbound.serverAddr, common.PtrValueOrDefault(options.Transport), outbound.tlsConfig)

View File

@@ -2,6 +2,7 @@ package route
import (
"context"
"errors"
"io"
"net"
"net/netip"
@@ -44,52 +45,16 @@ func (m *ConnectionManager) Start(stage adapter.StartStage) error {
return nil
}
func (m *ConnectionManager) Count() int {
return m.connections.Len()
}
func (m *ConnectionManager) CloseAll() {
m.access.Lock()
var closers []io.Closer
for element := m.connections.Front(); element != nil; {
nextElement := element.Next()
closers = append(closers, element.Value)
m.connections.Remove(element)
element = nextElement
}
m.access.Unlock()
for _, closer := range closers {
common.Close(closer)
}
}
func (m *ConnectionManager) Close() error {
m.CloseAll()
m.access.Lock()
defer m.access.Unlock()
for element := m.connections.Front(); element != nil; element = element.Next() {
common.Close(element.Value)
}
m.connections.Init()
return nil
}
func (m *ConnectionManager) TrackConn(conn net.Conn) net.Conn {
m.access.Lock()
element := m.connections.PushBack(conn)
m.access.Unlock()
return &trackedConn{
Conn: conn,
manager: m,
element: element,
}
}
func (m *ConnectionManager) TrackPacketConn(conn net.PacketConn) net.PacketConn {
m.access.Lock()
element := m.connections.PushBack(conn)
m.access.Unlock()
return &trackedPacketConn{
PacketConn: conn,
manager: m,
element: element,
}
}
func (m *ConnectionManager) NewConnection(ctx context.Context, this N.Dialer, conn net.Conn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) {
ctx = adapter.WithContext(ctx, &metadata)
var (
@@ -128,13 +93,17 @@ func (m *ConnectionManager) NewConnection(ctx context.Context, this N.Dialer, co
if metadata.TLSFragment || metadata.TLSRecordFragment {
remoteConn = tf.NewConn(remoteConn, ctx, metadata.TLSFragment, metadata.TLSRecordFragment, metadata.TLSFragmentFallbackDelay)
}
m.access.Lock()
element := m.connections.PushBack(conn)
m.access.Unlock()
onClose = N.AppendClose(onClose, func(it error) {
m.access.Lock()
defer m.access.Unlock()
m.connections.Remove(element)
})
var done atomic.Bool
if m.kickWriteHandshake(ctx, conn, remoteConn, false, &done, onClose) {
return
}
if m.kickWriteHandshake(ctx, remoteConn, conn, true, &done, onClose) {
return
}
m.preConnectionCopy(ctx, conn, remoteConn, false, &done, onClose)
m.preConnectionCopy(ctx, remoteConn, conn, true, &done, onClose)
go m.connectionCopy(ctx, conn, remoteConn, false, &done, onClose)
go m.connectionCopy(ctx, remoteConn, conn, true, &done, onClose)
}
@@ -244,13 +213,88 @@ func (m *ConnectionManager) NewPacketConnection(ctx context.Context, this N.Dial
ctx, conn = canceler.NewPacketConn(ctx, conn, udpTimeout)
}
destination := bufio.NewPacketConn(remotePacketConn)
m.access.Lock()
element := m.connections.PushBack(conn)
m.access.Unlock()
onClose = N.AppendClose(onClose, func(it error) {
m.access.Lock()
defer m.access.Unlock()
m.connections.Remove(element)
})
var done atomic.Bool
go m.packetConnectionCopy(ctx, conn, destination, false, &done, onClose)
go m.packetConnectionCopy(ctx, destination, conn, true, &done, onClose)
}
func (m *ConnectionManager) preConnectionCopy(ctx context.Context, source net.Conn, destination net.Conn, direction bool, done *atomic.Bool, onClose N.CloseHandlerFunc) {
readHandshake := N.NeedHandshakeForRead(source)
writeHandshake := N.NeedHandshakeForWrite(destination)
if readHandshake || writeHandshake {
var err error
for {
err = m.connectionCopyEarlyWrite(source, destination, readHandshake, writeHandshake)
if err == nil && N.NeedHandshakeForRead(source) {
continue
} else if E.IsMulti(err, os.ErrInvalid, context.DeadlineExceeded, io.EOF) {
err = nil
}
break
}
if err != nil {
if done.Swap(true) {
onClose(err)
}
common.Close(source, destination)
if !direction {
m.logger.ErrorContext(ctx, "connection upload handshake: ", err)
} else {
m.logger.ErrorContext(ctx, "connection download handshake: ", err)
}
return
}
}
}
func (m *ConnectionManager) connectionCopy(ctx context.Context, source net.Conn, destination net.Conn, direction bool, done *atomic.Bool, onClose N.CloseHandlerFunc) {
_, err := bufio.CopyWithIncreateBuffer(destination, source, bufio.DefaultIncreaseBufferAfter, bufio.DefaultBatchSize)
var (
sourceReader io.Reader = source
destinationWriter io.Writer = destination
)
var readCounters, writeCounters []N.CountFunc
for {
sourceReader, readCounters = N.UnwrapCountReader(sourceReader, readCounters)
destinationWriter, writeCounters = N.UnwrapCountWriter(destinationWriter, writeCounters)
if cachedSrc, isCached := sourceReader.(N.CachedReader); isCached {
cachedBuffer := cachedSrc.ReadCached()
if cachedBuffer != nil {
dataLen := cachedBuffer.Len()
_, err := destination.Write(cachedBuffer.Bytes())
cachedBuffer.Release()
if err != nil {
if done.Swap(true) {
onClose(err)
}
common.Close(source, destination)
if !direction {
m.logger.ErrorContext(ctx, "connection upload payload: ", err)
} else {
m.logger.ErrorContext(ctx, "connection download payload: ", err)
}
return
}
for _, counter := range readCounters {
counter(int64(dataLen))
}
for _, counter := range writeCounters {
counter(int64(dataLen))
}
}
continue
}
break
}
_, err := bufio.CopyWithCounters(destinationWriter, sourceReader, source, readCounters, writeCounters, bufio.DefaultIncreaseBufferAfter, bufio.DefaultBatchSize)
if err != nil {
common.Close(source, destination)
} else if duplexDst, isDuplex := destination.(N.WriteCloser); isDuplex {
@@ -262,9 +306,7 @@ func (m *ConnectionManager) connectionCopy(ctx context.Context, source net.Conn,
destination.Close()
}
if done.Swap(true) {
if onClose != nil {
onClose(err)
}
onClose(err)
common.Close(source, destination)
}
if !direction {
@@ -286,56 +328,45 @@ func (m *ConnectionManager) connectionCopy(ctx context.Context, source net.Conn,
}
}
func (m *ConnectionManager) kickWriteHandshake(ctx context.Context, source net.Conn, destination net.Conn, direction bool, done *atomic.Bool, onClose N.CloseHandlerFunc) bool {
if !N.NeedHandshakeForWrite(destination) {
return false
func (m *ConnectionManager) connectionCopyEarlyWrite(source net.Conn, destination io.Writer, readHandshake bool, writeHandshake bool) error {
payload := buf.NewPacket()
defer payload.Release()
err := source.SetReadDeadline(time.Now().Add(C.ReadPayloadTimeout))
if err != nil {
if err == os.ErrInvalid {
if writeHandshake {
return common.Error(destination.Write(nil))
}
}
return err
}
var (
cachedBuffer *buf.Buffer
wrotePayload bool
isTimeout bool
isEOF bool
)
sourceReader, readCounters := N.UnwrapCountReader(source, nil)
destinationWriter, writeCounters := N.UnwrapCountWriter(destination, nil)
if cachedReader, ok := sourceReader.(N.CachedReader); ok {
cachedBuffer = cachedReader.ReadCached()
}
var err error
if cachedBuffer != nil {
wrotePayload = true
dataLen := cachedBuffer.Len()
_, err = destinationWriter.Write(cachedBuffer.Bytes())
cachedBuffer.Release()
if err == nil {
for _, counter := range readCounters {
counter(int64(dataLen))
}
for _, counter := range writeCounters {
counter(int64(dataLen))
}
}
} else {
_ = destination.SetWriteDeadline(time.Now().Add(C.ReadPayloadTimeout))
_, err = destinationWriter.Write(nil)
_ = destination.SetWriteDeadline(time.Time{})
}
if err == nil {
return false
}
if !wrotePayload && (E.IsMulti(err, os.ErrInvalid, context.DeadlineExceeded, io.EOF) || E.IsTimeout(err)) {
return false
}
if !done.Swap(true) {
if onClose != nil {
onClose(err)
_, err = payload.ReadOnceFrom(source)
if err != nil {
if E.IsTimeout(err) {
isTimeout = true
} else if errors.Is(err, io.EOF) {
isEOF = true
} else {
return E.Cause(err, "read payload")
}
}
common.Close(source, destination)
if !direction {
m.logger.ErrorContext(ctx, "connection upload handshake: ", err)
} else {
m.logger.ErrorContext(ctx, "connection download handshake: ", err)
_ = source.SetReadDeadline(time.Time{})
if !payload.IsEmpty() || writeHandshake {
_, err = destination.Write(payload.Bytes())
if err != nil {
return E.Cause(err, "write payload")
}
}
return true
if isTimeout {
return context.DeadlineExceeded
} else if isEOF {
return io.EOF
}
return nil
}
func (m *ConnectionManager) packetConnectionCopy(ctx context.Context, source N.PacketReader, destination N.PacketWriter, direction bool, done *atomic.Bool, onClose N.CloseHandlerFunc) {
@@ -358,59 +389,7 @@ func (m *ConnectionManager) packetConnectionCopy(ctx context.Context, source N.P
}
}
if !done.Swap(true) {
if onClose != nil {
onClose(err)
}
onClose(err)
}
common.Close(source, destination)
}
type trackedConn struct {
net.Conn
manager *ConnectionManager
element *list.Element[io.Closer]
}
func (c *trackedConn) Close() error {
c.manager.access.Lock()
c.manager.connections.Remove(c.element)
c.manager.access.Unlock()
return c.Conn.Close()
}
func (c *trackedConn) Upstream() any {
return c.Conn
}
func (c *trackedConn) ReaderReplaceable() bool {
return true
}
func (c *trackedConn) WriterReplaceable() bool {
return true
}
type trackedPacketConn struct {
net.PacketConn
manager *ConnectionManager
element *list.Element[io.Closer]
}
func (c *trackedPacketConn) Close() error {
c.manager.access.Lock()
c.manager.connections.Remove(c.element)
c.manager.access.Unlock()
return c.PacketConn.Close()
}
func (c *trackedPacketConn) Upstream() any {
return bufio.NewPacketConn(c.PacketConn)
}
func (c *trackedPacketConn) ReaderReplaceable() bool {
return true
}
func (c *trackedPacketConn) WriterReplaceable() bool {
return true
}

View File

@@ -13,6 +13,7 @@ import (
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/conntrack"
"github.com/sagernet/sing-box/common/settings"
"github.com/sagernet/sing-box/common/taskmonitor"
C "github.com/sagernet/sing-box/constant"
@@ -47,7 +48,6 @@ type NetworkManager struct {
powerListener winpowrprof.EventListener
pauseManager pause.Manager
platformInterface adapter.PlatformInterface
connectionManager adapter.ConnectionManager
endpoint adapter.EndpointManager
inbound adapter.InboundManager
outbound adapter.OutboundManager
@@ -90,7 +90,6 @@ func NewNetworkManager(ctx context.Context, logger logger.ContextLogger, options
},
pauseManager: service.FromContext[pause.Manager](ctx),
platformInterface: service.FromContext[adapter.PlatformInterface](ctx),
connectionManager: service.FromContext[adapter.ConnectionManager](ctx),
endpoint: service.FromContext[adapter.EndpointManager](ctx),
inbound: service.FromContext[adapter.InboundManager](ctx),
outbound: service.FromContext[adapter.OutboundManager](ctx),
@@ -451,9 +450,7 @@ func (r *NetworkManager) UpdateWIFIState() {
}
func (r *NetworkManager) ResetNetwork() {
if r.connectionManager != nil {
r.connectionManager.CloseAll()
}
conntrack.Close()
for _, endpoint := range r.endpoint.Endpoints() {
listener, isListener := endpoint.(adapter.InterfaceUpdateListener)

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/conntrack"
"github.com/sagernet/sing-box/common/process"
"github.com/sagernet/sing-box/common/sniff"
C "github.com/sagernet/sing-box/constant"
@@ -79,6 +80,7 @@ func (r *Router) routeConnection(ctx context.Context, conn net.Conn, metadata ad
injectable.NewConnectionEx(ctx, conn, metadata, onClose)
return nil
}
conntrack.KillerCheck()
metadata.Network = N.NetworkTCP
switch metadata.Destination.Fqdn {
case mux.Destination.Fqdn:
@@ -214,6 +216,8 @@ func (r *Router) routePacketConnection(ctx context.Context, conn N.PacketConn, m
injectable.NewPacketConnectionEx(ctx, conn, metadata, onClose)
return nil
}
conntrack.KillerCheck()
// TODO: move to UoT
metadata.Network = N.NetworkUDP

View File

@@ -107,7 +107,9 @@ func (r *abstractDefaultRule) Match(metadata *adapter.InboundContext) bool {
}
for _, item := range r.items {
metadata.DidMatch = true
if _, isRuleSet := item.(*RuleSetItem); !isRuleSet {
metadata.DidMatch = true
}
if !item.Match(metadata) {
return r.invert
}

View File

@@ -1,157 +0,0 @@
package rule
import (
"context"
"testing"
"github.com/sagernet/sing-box/adapter"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing/common/x/list"
"github.com/stretchr/testify/require"
"go4.org/netipx"
)
type fakeRuleSet struct {
matched bool
}
func (f *fakeRuleSet) Name() string {
return "fake-rule-set"
}
func (f *fakeRuleSet) StartContext(context.Context, *adapter.HTTPStartContext) error {
return nil
}
func (f *fakeRuleSet) PostStart() error {
return nil
}
func (f *fakeRuleSet) Metadata() adapter.RuleSetMetadata {
return adapter.RuleSetMetadata{}
}
func (f *fakeRuleSet) ExtractIPSet() []*netipx.IPSet {
return nil
}
func (f *fakeRuleSet) IncRef() {}
func (f *fakeRuleSet) DecRef() {}
func (f *fakeRuleSet) Cleanup() {}
func (f *fakeRuleSet) RegisterCallback(adapter.RuleSetUpdateCallback) *list.Element[adapter.RuleSetUpdateCallback] {
return nil
}
func (f *fakeRuleSet) UnregisterCallback(*list.Element[adapter.RuleSetUpdateCallback]) {}
func (f *fakeRuleSet) Close() error {
return nil
}
func (f *fakeRuleSet) Match(*adapter.InboundContext) bool {
return f.matched
}
func (f *fakeRuleSet) String() string {
return "fake-rule-set"
}
type fakeRuleItem struct {
matched bool
}
func (f *fakeRuleItem) Match(*adapter.InboundContext) bool {
return f.matched
}
func (f *fakeRuleItem) String() string {
return "fake-rule-item"
}
func newRuleSetOnlyRule(ruleSetMatched bool, invert bool) *DefaultRule {
ruleSetItem := &RuleSetItem{
setList: []adapter.RuleSet{&fakeRuleSet{matched: ruleSetMatched}},
}
return &DefaultRule{
abstractDefaultRule: abstractDefaultRule{
items: []RuleItem{ruleSetItem},
allItems: []RuleItem{ruleSetItem},
invert: invert,
},
}
}
func newSingleItemRule(matched bool) *DefaultRule {
item := &fakeRuleItem{matched: matched}
return &DefaultRule{
abstractDefaultRule: abstractDefaultRule{
items: []RuleItem{item},
allItems: []RuleItem{item},
},
}
}
func TestAbstractDefaultRule_RuleSetOnly_InvertFalse(t *testing.T) {
t.Parallel()
require.True(t, newRuleSetOnlyRule(true, false).Match(&adapter.InboundContext{}))
require.False(t, newRuleSetOnlyRule(false, false).Match(&adapter.InboundContext{}))
}
func TestAbstractDefaultRule_RuleSetOnly_InvertTrue(t *testing.T) {
t.Parallel()
require.False(t, newRuleSetOnlyRule(true, true).Match(&adapter.InboundContext{}))
require.True(t, newRuleSetOnlyRule(false, true).Match(&adapter.InboundContext{}))
}
func TestAbstractLogicalRule_And_WithRuleSetInvert(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
aMatched bool
ruleSetBMatch bool
expected bool
}{
{
name: "A true B true",
aMatched: true,
ruleSetBMatch: true,
expected: false,
},
{
name: "A true B false",
aMatched: true,
ruleSetBMatch: false,
expected: true,
},
{
name: "A false B true",
aMatched: false,
ruleSetBMatch: true,
expected: false,
},
{
name: "A false B false",
aMatched: false,
ruleSetBMatch: false,
expected: false,
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
t.Parallel()
logicalRule := &abstractLogicalRule{
mode: C.LogicalTypeAnd,
rules: []adapter.HeadlessRule{
newSingleItemRule(testCase.aMatched),
newRuleSetOnlyRule(testCase.ruleSetBMatch, true),
},
}
require.Equal(t, testCase.expected, logicalRule.Match(&adapter.InboundContext{}))
})
}
}

View File

@@ -10,7 +10,6 @@ import (
"mime"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
@@ -80,35 +79,6 @@ func isHopByHopHeader(header string) bool {
}
}
const (
weeklyWindowSeconds = 604800
weeklyWindowMinutes = weeklyWindowSeconds / 60
)
func parseInt64Header(headers http.Header, headerName string) (int64, bool) {
headerValue := strings.TrimSpace(headers.Get(headerName))
if headerValue == "" {
return 0, false
}
parsedValue, parseError := strconv.ParseInt(headerValue, 10, 64)
if parseError != nil {
return 0, false
}
return parsedValue, true
}
func extractWeeklyCycleHint(headers http.Header) *WeeklyCycleHint {
resetAtUnix, hasResetAt := parseInt64Header(headers, "anthropic-ratelimit-unified-7d-reset")
if !hasResetAt || resetAtUnix <= 0 {
return nil
}
return &WeeklyCycleHint{
WindowMinutes: weeklyWindowMinutes,
ResetAt: time.Unix(resetAtUnix, 0).UTC(),
}
}
type Service struct {
boxService.Adapter
ctx context.Context
@@ -422,7 +392,6 @@ func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, response *http.Response, requestModel string, anthropicBetaHeader string, messagesCount int, username string) {
weeklyCycleHint := extractWeeklyCycleHint(response.Header)
mediaType, _, err := mime.ParseMediaType(response.Header.Get("Content-Type"))
isStreaming := err == nil && mediaType == "text/event-stream"
@@ -448,7 +417,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
if usage.InputTokens > 0 || usage.OutputTokens > 0 {
if responseModel != "" {
contextWindow := detectContextWindow(anthropicBetaHeader, usage.InputTokens)
s.usageTracker.AddUsageWithCycleHint(
s.usageTracker.AddUsage(
responseModel,
contextWindow,
messagesCount,
@@ -456,11 +425,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
usage.OutputTokens,
usage.CacheReadInputTokens,
usage.CacheCreationInputTokens,
usage.CacheCreation.Ephemeral5mInputTokens,
usage.CacheCreation.Ephemeral1hInputTokens,
username,
time.Now(),
weeklyCycleHint,
)
}
}
@@ -520,8 +485,6 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
accumulatedUsage.InputTokens = messageStart.Message.Usage.InputTokens
accumulatedUsage.CacheReadInputTokens = messageStart.Message.Usage.CacheReadInputTokens
accumulatedUsage.CacheCreationInputTokens = messageStart.Message.Usage.CacheCreationInputTokens
accumulatedUsage.CacheCreation.Ephemeral5mInputTokens = messageStart.Message.Usage.CacheCreation.Ephemeral5mInputTokens
accumulatedUsage.CacheCreation.Ephemeral1hInputTokens = messageStart.Message.Usage.CacheCreation.Ephemeral1hInputTokens
}
case "message_delta":
messageDelta := event.AsMessageDelta()
@@ -548,7 +511,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
if accumulatedUsage.InputTokens > 0 || accumulatedUsage.OutputTokens > 0 {
if responseModel != "" {
contextWindow := detectContextWindow(anthropicBetaHeader, accumulatedUsage.InputTokens)
s.usageTracker.AddUsageWithCycleHint(
s.usageTracker.AddUsage(
responseModel,
contextWindow,
messagesCount,
@@ -556,11 +519,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
accumulatedUsage.OutputTokens,
accumulatedUsage.CacheReadInputTokens,
accumulatedUsage.CacheCreationInputTokens,
accumulatedUsage.CacheCreation.Ephemeral5mInputTokens,
accumulatedUsage.CacheCreation.Ephemeral1hInputTokens,
username,
time.Now(),
weeklyCycleHint,
)
}
}

View File

@@ -2,7 +2,6 @@ package ccm
import (
"encoding/json"
"fmt"
"math"
"os"
"regexp"
@@ -14,20 +13,17 @@ import (
)
type UsageStats struct {
RequestCount int `json:"request_count"`
MessagesCount int `json:"messages_count"`
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
CacheReadInputTokens int64 `json:"cache_read_input_tokens"`
CacheCreationInputTokens int64 `json:"cache_creation_input_tokens"`
CacheCreation5MinuteInputTokens int64 `json:"cache_creation_5m_input_tokens,omitempty"`
CacheCreation1HourInputTokens int64 `json:"cache_creation_1h_input_tokens,omitempty"`
RequestCount int `json:"request_count"`
MessagesCount int `json:"messages_count"`
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
CacheReadInputTokens int64 `json:"cache_read_input_tokens"`
CacheCreationInputTokens int64 `json:"cache_creation_input_tokens"`
}
type CostCombination struct {
Model string `json:"model"`
ContextWindow int `json:"context_window"`
WeekStartUnix int64 `json:"week_start_unix,omitempty"`
Total UsageStats `json:"total"`
ByUser map[string]UsageStats `json:"by_user"`
}
@@ -45,21 +41,18 @@ type AggregatedUsage struct {
}
type UsageStatsJSON struct {
RequestCount int `json:"request_count"`
MessagesCount int `json:"messages_count"`
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
CacheReadInputTokens int64 `json:"cache_read_input_tokens"`
CacheCreationInputTokens int64 `json:"cache_creation_input_tokens"`
CacheCreation5MinuteInputTokens int64 `json:"cache_creation_5m_input_tokens,omitempty"`
CacheCreation1HourInputTokens int64 `json:"cache_creation_1h_input_tokens,omitempty"`
CostUSD float64 `json:"cost_usd"`
RequestCount int `json:"request_count"`
MessagesCount int `json:"messages_count"`
InputTokens int64 `json:"input_tokens"`
OutputTokens int64 `json:"output_tokens"`
CacheReadInputTokens int64 `json:"cache_read_input_tokens"`
CacheCreationInputTokens int64 `json:"cache_creation_input_tokens"`
CostUSD float64 `json:"cost_usd"`
}
type CostCombinationJSON struct {
Model string `json:"model"`
ContextWindow int `json:"context_window"`
WeekStartUnix int64 `json:"week_start_unix,omitempty"`
Total UsageStatsJSON `json:"total"`
ByUser map[string]UsageStatsJSON `json:"by_user"`
}
@@ -67,7 +60,6 @@ type CostCombinationJSON struct {
type CostsSummaryJSON struct {
TotalUSD float64 `json:"total_usd"`
ByUser map[string]float64 `json:"by_user"`
ByWeek map[string]float64 `json:"by_week,omitempty"`
}
type AggregatedUsageJSON struct {
@@ -76,17 +68,11 @@ type AggregatedUsageJSON struct {
Combinations []CostCombinationJSON `json:"combinations"`
}
type WeeklyCycleHint struct {
WindowMinutes int64
ResetAt time.Time
}
type ModelPricing struct {
InputPrice float64
OutputPrice float64
CacheReadPrice float64
CacheWritePrice5Minute float64
CacheWritePrice1Hour float64
InputPrice float64
OutputPrice float64
CacheReadPrice float64
CacheWritePrice float64
}
type modelFamily struct {
@@ -96,205 +82,143 @@ type modelFamily struct {
}
var (
opus46StandardPricing = ModelPricing{
InputPrice: 5.0,
OutputPrice: 25.0,
CacheReadPrice: 0.5,
CacheWritePrice5Minute: 6.25,
CacheWritePrice1Hour: 10.0,
}
opus46PremiumPricing = ModelPricing{
InputPrice: 10.0,
OutputPrice: 37.5,
CacheReadPrice: 1.0,
CacheWritePrice5Minute: 12.5,
CacheWritePrice1Hour: 20.0,
}
opus45Pricing = ModelPricing{
InputPrice: 5.0,
OutputPrice: 25.0,
CacheReadPrice: 0.5,
CacheWritePrice5Minute: 6.25,
CacheWritePrice1Hour: 10.0,
}
opus4Pricing = ModelPricing{
InputPrice: 15.0,
OutputPrice: 75.0,
CacheReadPrice: 1.5,
CacheWritePrice5Minute: 18.75,
CacheWritePrice1Hour: 30.0,
}
sonnet46StandardPricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice5Minute: 3.75,
CacheWritePrice1Hour: 6.0,
}
sonnet46PremiumPricing = ModelPricing{
InputPrice: 6.0,
OutputPrice: 22.5,
CacheReadPrice: 0.6,
CacheWritePrice5Minute: 7.5,
CacheWritePrice1Hour: 12.0,
}
sonnet45StandardPricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice5Minute: 3.75,
CacheWritePrice1Hour: 6.0,
}
sonnet45PremiumPricing = ModelPricing{
InputPrice: 6.0,
OutputPrice: 22.5,
CacheReadPrice: 0.6,
CacheWritePrice5Minute: 7.5,
CacheWritePrice1Hour: 12.0,
InputPrice: 15.0,
OutputPrice: 75.0,
CacheReadPrice: 1.5,
CacheWritePrice: 18.75,
}
sonnet4StandardPricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice5Minute: 3.75,
CacheWritePrice1Hour: 6.0,
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice: 3.75,
}
sonnet4PremiumPricing = ModelPricing{
InputPrice: 6.0,
OutputPrice: 22.5,
CacheReadPrice: 0.6,
CacheWritePrice5Minute: 7.5,
CacheWritePrice1Hour: 12.0,
}
sonnet37Pricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice5Minute: 3.75,
CacheWritePrice1Hour: 6.0,
}
sonnet35Pricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice5Minute: 3.75,
CacheWritePrice1Hour: 6.0,
}
haiku45Pricing = ModelPricing{
InputPrice: 1.0,
OutputPrice: 5.0,
CacheReadPrice: 0.1,
CacheWritePrice5Minute: 1.25,
CacheWritePrice1Hour: 2.0,
InputPrice: 6.0,
OutputPrice: 22.5,
CacheReadPrice: 0.6,
CacheWritePrice: 7.5,
}
haiku4Pricing = ModelPricing{
InputPrice: 1.0,
OutputPrice: 5.0,
CacheReadPrice: 0.1,
CacheWritePrice5Minute: 1.25,
CacheWritePrice1Hour: 2.0,
InputPrice: 1.0,
OutputPrice: 5.0,
CacheReadPrice: 0.1,
CacheWritePrice: 1.25,
}
haiku35Pricing = ModelPricing{
InputPrice: 0.8,
OutputPrice: 4.0,
CacheReadPrice: 0.08,
CacheWritePrice5Minute: 1.0,
CacheWritePrice1Hour: 1.6,
InputPrice: 0.8,
OutputPrice: 4.0,
CacheReadPrice: 0.08,
CacheWritePrice: 1.0,
}
sonnet35Pricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice: 3.75,
}
opus45Pricing = ModelPricing{
InputPrice: 5.0,
OutputPrice: 25.0,
CacheReadPrice: 0.5,
CacheWritePrice: 6.25,
}
sonnet45StandardPricing = ModelPricing{
InputPrice: 3.0,
OutputPrice: 15.0,
CacheReadPrice: 0.3,
CacheWritePrice: 3.75,
}
sonnet45PremiumPricing = ModelPricing{
InputPrice: 6.0,
OutputPrice: 22.5,
CacheReadPrice: 0.6,
CacheWritePrice: 7.5,
}
haiku45Pricing = ModelPricing{
InputPrice: 1.0,
OutputPrice: 5.0,
CacheReadPrice: 0.1,
CacheWritePrice: 1.25,
}
haiku3Pricing = ModelPricing{
InputPrice: 0.25,
OutputPrice: 1.25,
CacheReadPrice: 0.03,
CacheWritePrice5Minute: 0.3,
CacheWritePrice1Hour: 0.5,
InputPrice: 0.25,
OutputPrice: 1.25,
CacheReadPrice: 0.03,
CacheWritePrice: 0.3,
}
opus3Pricing = ModelPricing{
InputPrice: 15.0,
OutputPrice: 75.0,
CacheReadPrice: 1.5,
CacheWritePrice5Minute: 18.75,
CacheWritePrice1Hour: 30.0,
InputPrice: 15.0,
OutputPrice: 75.0,
CacheReadPrice: 1.5,
CacheWritePrice: 18.75,
}
modelFamilies = []modelFamily{
{
pattern: regexp.MustCompile(`^claude-opus-4-6(?:-|$)`),
standardPricing: opus46StandardPricing,
premiumPricing: &opus46PremiumPricing,
},
{
pattern: regexp.MustCompile(`^claude-opus-4-5(?:-|$)`),
pattern: regexp.MustCompile(`^claude-opus-4-5-`),
standardPricing: opus45Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-(?:opus-4(?:-|$)|4-opus-)`),
pattern: regexp.MustCompile(`^claude-(?:opus-4-|4-opus-|opus-4-1-)`),
standardPricing: opus4Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-(?:opus-3(?:-|$)|3-opus-)`),
pattern: regexp.MustCompile(`^claude-(?:opus-3-|3-opus-)`),
standardPricing: opus3Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-(?:sonnet-4-6(?:-|$)|4-6-sonnet-)`),
standardPricing: sonnet46StandardPricing,
premiumPricing: &sonnet46PremiumPricing,
},
{
pattern: regexp.MustCompile(`^claude-(?:sonnet-4-5(?:-|$)|4-5-sonnet-)`),
pattern: regexp.MustCompile(`^claude-(?:sonnet-4-5-|4-5-sonnet-)`),
standardPricing: sonnet45StandardPricing,
premiumPricing: &sonnet45PremiumPricing,
},
{
pattern: regexp.MustCompile(`^claude-(?:sonnet-4(?:-|$)|4-sonnet-)`),
pattern: regexp.MustCompile(`^claude-3-7-sonnet-`),
standardPricing: sonnet4StandardPricing,
premiumPricing: &sonnet4PremiumPricing,
},
{
pattern: regexp.MustCompile(`^claude-3-7-sonnet(?:-|$)`),
standardPricing: sonnet37Pricing,
premiumPricing: nil,
pattern: regexp.MustCompile(`^claude-(?:sonnet-4-|4-sonnet-)`),
standardPricing: sonnet4StandardPricing,
premiumPricing: &sonnet4PremiumPricing,
},
{
pattern: regexp.MustCompile(`^claude-3-5-sonnet(?:-|$)`),
pattern: regexp.MustCompile(`^claude-3-5-sonnet-`),
standardPricing: sonnet35Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-(?:haiku-4-5(?:-|$)|4-5-haiku-)`),
pattern: regexp.MustCompile(`^claude-(?:haiku-4-5-|4-5-haiku-)`),
standardPricing: haiku45Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-haiku-4(?:-|$)`),
pattern: regexp.MustCompile(`^claude-haiku-4-`),
standardPricing: haiku4Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-3-5-haiku(?:-|$)`),
pattern: regexp.MustCompile(`^claude-3-5-haiku-`),
standardPricing: haiku35Pricing,
premiumPricing: nil,
},
{
pattern: regexp.MustCompile(`^claude-3-haiku(?:-|$)`),
pattern: regexp.MustCompile(`^claude-3-haiku-`),
standardPricing: haiku3Pricing,
premiumPricing: nil,
},
@@ -319,211 +243,68 @@ func getPricing(model string, contextWindow int) ModelPricing {
func calculateCost(stats UsageStats, model string, contextWindow int) float64 {
pricing := getPricing(model, contextWindow)
cacheCreationCost := 0.0
if stats.CacheCreation5MinuteInputTokens > 0 || stats.CacheCreation1HourInputTokens > 0 {
cacheCreationCost = float64(stats.CacheCreation5MinuteInputTokens)*pricing.CacheWritePrice5Minute +
float64(stats.CacheCreation1HourInputTokens)*pricing.CacheWritePrice1Hour
} else {
// Backward compatibility for usage files generated before TTL split tracking.
cacheCreationCost = float64(stats.CacheCreationInputTokens) * pricing.CacheWritePrice5Minute
}
cost := (float64(stats.InputTokens)*pricing.InputPrice +
float64(stats.OutputTokens)*pricing.OutputPrice +
float64(stats.CacheReadInputTokens)*pricing.CacheReadPrice +
cacheCreationCost) / 1_000_000
float64(stats.CacheCreationInputTokens)*pricing.CacheWritePrice) / 1_000_000
return math.Round(cost*100) / 100
}
func roundCost(cost float64) float64 {
return math.Round(cost*100) / 100
}
func normalizeCombinations(combinations []CostCombination) {
for index := range combinations {
if combinations[index].ByUser == nil {
combinations[index].ByUser = make(map[string]UsageStats)
}
}
}
func addUsageToCombinations(
combinations *[]CostCombination,
model string,
contextWindow int,
weekStartUnix int64,
messagesCount int,
inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, cacheCreation5MinuteTokens, cacheCreation1HourTokens int64,
user string,
) {
var matchedCombination *CostCombination
for index := range *combinations {
combination := &(*combinations)[index]
if combination.Model == model && combination.ContextWindow == contextWindow && combination.WeekStartUnix == weekStartUnix {
matchedCombination = combination
break
}
}
if matchedCombination == nil {
newCombination := CostCombination{
Model: model,
ContextWindow: contextWindow,
WeekStartUnix: weekStartUnix,
Total: UsageStats{},
ByUser: make(map[string]UsageStats),
}
*combinations = append(*combinations, newCombination)
matchedCombination = &(*combinations)[len(*combinations)-1]
}
if cacheCreationTokens == 0 {
cacheCreationTokens = cacheCreation5MinuteTokens + cacheCreation1HourTokens
}
matchedCombination.Total.RequestCount++
matchedCombination.Total.MessagesCount += messagesCount
matchedCombination.Total.InputTokens += inputTokens
matchedCombination.Total.OutputTokens += outputTokens
matchedCombination.Total.CacheReadInputTokens += cacheReadTokens
matchedCombination.Total.CacheCreationInputTokens += cacheCreationTokens
matchedCombination.Total.CacheCreation5MinuteInputTokens += cacheCreation5MinuteTokens
matchedCombination.Total.CacheCreation1HourInputTokens += cacheCreation1HourTokens
if user != "" {
userStats := matchedCombination.ByUser[user]
userStats.RequestCount++
userStats.MessagesCount += messagesCount
userStats.InputTokens += inputTokens
userStats.OutputTokens += outputTokens
userStats.CacheReadInputTokens += cacheReadTokens
userStats.CacheCreationInputTokens += cacheCreationTokens
userStats.CacheCreation5MinuteInputTokens += cacheCreation5MinuteTokens
userStats.CacheCreation1HourInputTokens += cacheCreation1HourTokens
matchedCombination.ByUser[user] = userStats
}
}
func buildCombinationJSON(combinations []CostCombination, aggregateUserCosts map[string]float64) ([]CostCombinationJSON, float64) {
result := make([]CostCombinationJSON, len(combinations))
var totalCost float64
for index, combination := range combinations {
combinationTotalCost := calculateCost(combination.Total, combination.Model, combination.ContextWindow)
totalCost += combinationTotalCost
combinationJSON := CostCombinationJSON{
Model: combination.Model,
ContextWindow: combination.ContextWindow,
WeekStartUnix: combination.WeekStartUnix,
Total: UsageStatsJSON{
RequestCount: combination.Total.RequestCount,
MessagesCount: combination.Total.MessagesCount,
InputTokens: combination.Total.InputTokens,
OutputTokens: combination.Total.OutputTokens,
CacheReadInputTokens: combination.Total.CacheReadInputTokens,
CacheCreationInputTokens: combination.Total.CacheCreationInputTokens,
CacheCreation5MinuteInputTokens: combination.Total.CacheCreation5MinuteInputTokens,
CacheCreation1HourInputTokens: combination.Total.CacheCreation1HourInputTokens,
CostUSD: combinationTotalCost,
},
ByUser: make(map[string]UsageStatsJSON),
}
for user, userStats := range combination.ByUser {
userCost := calculateCost(userStats, combination.Model, combination.ContextWindow)
if aggregateUserCosts != nil {
aggregateUserCosts[user] += userCost
}
combinationJSON.ByUser[user] = UsageStatsJSON{
RequestCount: userStats.RequestCount,
MessagesCount: userStats.MessagesCount,
InputTokens: userStats.InputTokens,
OutputTokens: userStats.OutputTokens,
CacheReadInputTokens: userStats.CacheReadInputTokens,
CacheCreationInputTokens: userStats.CacheCreationInputTokens,
CacheCreation5MinuteInputTokens: userStats.CacheCreation5MinuteInputTokens,
CacheCreation1HourInputTokens: userStats.CacheCreation1HourInputTokens,
CostUSD: userCost,
}
}
result[index] = combinationJSON
}
return result, roundCost(totalCost)
}
func formatUTCOffsetLabel(timestamp time.Time) string {
_, offsetSeconds := timestamp.Zone()
sign := "+"
if offsetSeconds < 0 {
sign = "-"
offsetSeconds = -offsetSeconds
}
offsetHours := offsetSeconds / 3600
offsetMinutes := (offsetSeconds % 3600) / 60
if offsetMinutes == 0 {
return fmt.Sprintf("UTC%s%d", sign, offsetHours)
}
return fmt.Sprintf("UTC%s%d:%02d", sign, offsetHours, offsetMinutes)
}
func formatWeekStartKey(cycleStartAt time.Time) string {
localCycleStart := cycleStartAt.In(time.Local)
return fmt.Sprintf("%s %s", localCycleStart.Format("2006-01-02 15:04:05"), formatUTCOffsetLabel(localCycleStart))
}
func buildByWeekCost(combinations []CostCombination) map[string]float64 {
byWeek := make(map[string]float64)
for _, combination := range combinations {
if combination.WeekStartUnix <= 0 {
continue
}
weekStartAt := time.Unix(combination.WeekStartUnix, 0).UTC()
weekKey := formatWeekStartKey(weekStartAt)
byWeek[weekKey] += calculateCost(combination.Total, combination.Model, combination.ContextWindow)
}
for weekKey, weekCost := range byWeek {
byWeek[weekKey] = roundCost(weekCost)
}
return byWeek
}
func deriveWeekStartUnix(cycleHint *WeeklyCycleHint) int64 {
if cycleHint == nil || cycleHint.WindowMinutes <= 0 || cycleHint.ResetAt.IsZero() {
return 0
}
windowDuration := time.Duration(cycleHint.WindowMinutes) * time.Minute
return cycleHint.ResetAt.UTC().Add(-windowDuration).Unix()
}
func (u *AggregatedUsage) ToJSON() *AggregatedUsageJSON {
u.mutex.Lock()
defer u.mutex.Unlock()
result := &AggregatedUsageJSON{
LastUpdated: u.LastUpdated,
LastUpdated: u.LastUpdated,
Combinations: make([]CostCombinationJSON, len(u.Combinations)),
Costs: CostsSummaryJSON{
TotalUSD: 0,
ByUser: make(map[string]float64),
ByWeek: make(map[string]float64),
},
}
globalCombinationsJSON, totalCost := buildCombinationJSON(u.Combinations, result.Costs.ByUser)
result.Combinations = globalCombinationsJSON
result.Costs.TotalUSD = totalCost
result.Costs.ByWeek = buildByWeekCost(u.Combinations)
for i, combo := range u.Combinations {
totalCost := calculateCost(combo.Total, combo.Model, combo.ContextWindow)
if len(result.Costs.ByWeek) == 0 {
result.Costs.ByWeek = nil
result.Costs.TotalUSD += totalCost
comboJSON := CostCombinationJSON{
Model: combo.Model,
ContextWindow: combo.ContextWindow,
Total: UsageStatsJSON{
RequestCount: combo.Total.RequestCount,
MessagesCount: combo.Total.MessagesCount,
InputTokens: combo.Total.InputTokens,
OutputTokens: combo.Total.OutputTokens,
CacheReadInputTokens: combo.Total.CacheReadInputTokens,
CacheCreationInputTokens: combo.Total.CacheCreationInputTokens,
CostUSD: totalCost,
},
ByUser: make(map[string]UsageStatsJSON),
}
for user, userStats := range combo.ByUser {
userCost := calculateCost(userStats, combo.Model, combo.ContextWindow)
result.Costs.ByUser[user] += userCost
comboJSON.ByUser[user] = UsageStatsJSON{
RequestCount: userStats.RequestCount,
MessagesCount: userStats.MessagesCount,
InputTokens: userStats.InputTokens,
OutputTokens: userStats.OutputTokens,
CacheReadInputTokens: userStats.CacheReadInputTokens,
CacheCreationInputTokens: userStats.CacheCreationInputTokens,
CostUSD: userCost,
}
}
result.Combinations[i] = comboJSON
}
result.Costs.TotalUSD = math.Round(result.Costs.TotalUSD*100) / 100
for user, cost := range result.Costs.ByUser {
result.Costs.ByUser[user] = roundCost(cost)
result.Costs.ByUser[user] = math.Round(cost*100) / 100
}
return result
@@ -533,9 +314,6 @@ func (u *AggregatedUsage) Load() error {
u.mutex.Lock()
defer u.mutex.Unlock()
u.LastUpdated = time.Time{}
u.Combinations = nil
data, err := os.ReadFile(u.filePath)
if err != nil {
if os.IsNotExist(err) {
@@ -556,7 +334,12 @@ func (u *AggregatedUsage) Load() error {
u.LastUpdated = temp.LastUpdated
u.Combinations = temp.Combinations
normalizeCombinations(u.Combinations)
for i := range u.Combinations {
if u.Combinations[i].ByUser == nil {
u.Combinations[i].ByUser = make(map[string]UsageStats)
}
}
return nil
}
@@ -584,42 +367,58 @@ func (u *AggregatedUsage) Save() error {
return err
}
func (u *AggregatedUsage) AddUsage(
model string,
contextWindow int,
messagesCount int,
inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, cacheCreation5MinuteTokens, cacheCreation1HourTokens int64,
user string,
) error {
return u.AddUsageWithCycleHint(model, contextWindow, messagesCount, inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, cacheCreation5MinuteTokens, cacheCreation1HourTokens, user, time.Now(), nil)
}
func (u *AggregatedUsage) AddUsageWithCycleHint(
model string,
contextWindow int,
messagesCount int,
inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, cacheCreation5MinuteTokens, cacheCreation1HourTokens int64,
user string,
observedAt time.Time,
cycleHint *WeeklyCycleHint,
) error {
func (u *AggregatedUsage) AddUsage(model string, contextWindow int, messagesCount int, inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens int64, user string) error {
if model == "" {
return E.New("model cannot be empty")
}
if contextWindow <= 0 {
return E.New("contextWindow must be positive")
}
if observedAt.IsZero() {
observedAt = time.Now()
}
u.mutex.Lock()
defer u.mutex.Unlock()
u.LastUpdated = observedAt
weekStartUnix := deriveWeekStartUnix(cycleHint)
u.LastUpdated = time.Now()
addUsageToCombinations(&u.Combinations, model, contextWindow, weekStartUnix, messagesCount, inputTokens, outputTokens, cacheReadTokens, cacheCreationTokens, cacheCreation5MinuteTokens, cacheCreation1HourTokens, user)
// Find or create combination
var combo *CostCombination
for i := range u.Combinations {
if u.Combinations[i].Model == model && u.Combinations[i].ContextWindow == contextWindow {
combo = &u.Combinations[i]
break
}
}
if combo == nil {
newCombo := CostCombination{
Model: model,
ContextWindow: contextWindow,
Total: UsageStats{},
ByUser: make(map[string]UsageStats),
}
u.Combinations = append(u.Combinations, newCombo)
combo = &u.Combinations[len(u.Combinations)-1]
}
// Update total stats
combo.Total.RequestCount++
combo.Total.MessagesCount += messagesCount
combo.Total.InputTokens += inputTokens
combo.Total.OutputTokens += outputTokens
combo.Total.CacheReadInputTokens += cacheReadTokens
combo.Total.CacheCreationInputTokens += cacheCreationTokens
// Update per-user stats if user is specified
if user != "" {
userStats := combo.ByUser[user]
userStats.RequestCount++
userStats.MessagesCount += messagesCount
userStats.InputTokens += inputTokens
userStats.OutputTokens += outputTokens
userStats.CacheReadInputTokens += cacheReadTokens
userStats.CacheCreationInputTokens += cacheCreationTokens
combo.ByUser[user] = userStats
}
go u.scheduleSave()

View File

@@ -10,7 +10,6 @@ import (
"mime"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
@@ -72,57 +71,6 @@ func isHopByHopHeader(header string) bool {
}
}
func normalizeRateLimitIdentifier(limitIdentifier string) string {
trimmedIdentifier := strings.TrimSpace(strings.ToLower(limitIdentifier))
if trimmedIdentifier == "" {
return ""
}
return strings.ReplaceAll(trimmedIdentifier, "_", "-")
}
func parseInt64Header(headers http.Header, headerName string) (int64, bool) {
headerValue := strings.TrimSpace(headers.Get(headerName))
if headerValue == "" {
return 0, false
}
parsedValue, parseError := strconv.ParseInt(headerValue, 10, 64)
if parseError != nil {
return 0, false
}
return parsedValue, true
}
func weeklyCycleHintForLimit(headers http.Header, limitIdentifier string) *WeeklyCycleHint {
normalizedLimitIdentifier := normalizeRateLimitIdentifier(limitIdentifier)
if normalizedLimitIdentifier == "" {
return nil
}
windowHeader := "x-" + normalizedLimitIdentifier + "-secondary-window-minutes"
resetHeader := "x-" + normalizedLimitIdentifier + "-secondary-reset-at"
windowMinutes, hasWindowMinutes := parseInt64Header(headers, windowHeader)
resetAtUnix, hasResetAt := parseInt64Header(headers, resetHeader)
if !hasWindowMinutes || !hasResetAt || windowMinutes <= 0 || resetAtUnix <= 0 {
return nil
}
return &WeeklyCycleHint{
WindowMinutes: windowMinutes,
ResetAt: time.Unix(resetAtUnix, 0).UTC(),
}
}
func extractWeeklyCycleHint(headers http.Header) *WeeklyCycleHint {
activeLimitIdentifier := normalizeRateLimitIdentifier(headers.Get("x-codex-active-limit"))
if activeLimitIdentifier != "" {
if activeHint := weeklyCycleHintForLimit(headers, activeLimitIdentifier); activeHint != nil {
return activeHint
}
}
return weeklyCycleHintForLimit(headers, "codex")
}
type Service struct {
boxService.Adapter
ctx context.Context
@@ -456,12 +404,9 @@ func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, response *http.Response, path string, requestModel string, username string) {
isChatCompletions := path == "/v1/chat/completions"
weeklyCycleHint := extractWeeklyCycleHint(response.Header)
mediaType, _, err := mime.ParseMediaType(response.Header.Get("Content-Type"))
isStreaming := err == nil && mediaType == "text/event-stream"
if !isStreaming && !isChatCompletions && response.Header.Get("Content-Type") == "" {
isStreaming = true
}
if !isStreaming {
bodyBytes, err := io.ReadAll(response.Body)
if err != nil {
@@ -469,14 +414,13 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
return
}
var responseModel, serviceTier string
var responseModel string
var inputTokens, outputTokens, cachedTokens int64
if isChatCompletions {
var chatCompletion openai.ChatCompletion
if json.Unmarshal(bodyBytes, &chatCompletion) == nil {
responseModel = chatCompletion.Model
serviceTier = string(chatCompletion.ServiceTier)
inputTokens = chatCompletion.Usage.PromptTokens
outputTokens = chatCompletion.Usage.CompletionTokens
cachedTokens = chatCompletion.Usage.PromptTokensDetails.CachedTokens
@@ -485,7 +429,6 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
var responsesResponse responses.Response
if json.Unmarshal(bodyBytes, &responsesResponse) == nil {
responseModel = string(responsesResponse.Model)
serviceTier = string(responsesResponse.ServiceTier)
inputTokens = responsesResponse.Usage.InputTokens
outputTokens = responsesResponse.Usage.OutputTokens
cachedTokens = responsesResponse.Usage.InputTokensDetails.CachedTokens
@@ -497,16 +440,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
responseModel = requestModel
}
if responseModel != "" {
s.usageTracker.AddUsageWithCycleHint(
responseModel,
inputTokens,
outputTokens,
cachedTokens,
serviceTier,
username,
time.Now(),
weeklyCycleHint,
)
s.usageTracker.AddUsage(responseModel, inputTokens, outputTokens, cachedTokens, username)
}
}
@@ -521,7 +455,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
}
var inputTokens, outputTokens, cachedTokens int64
var responseModel, serviceTier string
var responseModel string
buffer := make([]byte, buf.BufferSize)
var leftover []byte
@@ -556,9 +490,6 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
if chatChunk.Model != "" {
responseModel = chatChunk.Model
}
if chatChunk.ServiceTier != "" {
serviceTier = string(chatChunk.ServiceTier)
}
if chatChunk.Usage.PromptTokens > 0 {
inputTokens = chatChunk.Usage.PromptTokens
cachedTokens = chatChunk.Usage.PromptTokensDetails.CachedTokens
@@ -575,9 +506,6 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
if string(completedEvent.Response.Model) != "" {
responseModel = string(completedEvent.Response.Model)
}
if completedEvent.Response.ServiceTier != "" {
serviceTier = string(completedEvent.Response.ServiceTier)
}
if completedEvent.Response.Usage.InputTokens > 0 {
inputTokens = completedEvent.Response.Usage.InputTokens
cachedTokens = completedEvent.Response.Usage.InputTokensDetails.CachedTokens
@@ -606,16 +534,7 @@ func (s *Service) handleResponseWithTracking(writer http.ResponseWriter, respons
if inputTokens > 0 || outputTokens > 0 {
if responseModel != "" {
s.usageTracker.AddUsageWithCycleHint(
responseModel,
inputTokens,
outputTokens,
cachedTokens,
serviceTier,
username,
time.Now(),
weeklyCycleHint,
)
s.usageTracker.AddUsage(responseModel, inputTokens, outputTokens, cachedTokens, username)
}
}
return

View File

@@ -2,11 +2,9 @@ package ocm
import (
"encoding/json"
"fmt"
"math"
"os"
"regexp"
"strings"
"sync"
"time"
@@ -44,11 +42,9 @@ func (u *UsageStats) UnmarshalJSON(data []byte) error {
}
type CostCombination struct {
Model string `json:"model"`
ServiceTier string `json:"service_tier,omitempty"`
WeekStartUnix int64 `json:"week_start_unix,omitempty"`
Total UsageStats `json:"total"`
ByUser map[string]UsageStats `json:"by_user"`
Model string `json:"model"`
Total UsageStats `json:"total"`
ByUser map[string]UsageStats `json:"by_user"`
}
type AggregatedUsage struct {
@@ -72,17 +68,14 @@ type UsageStatsJSON struct {
}
type CostCombinationJSON struct {
Model string `json:"model"`
ServiceTier string `json:"service_tier,omitempty"`
WeekStartUnix int64 `json:"week_start_unix,omitempty"`
Total UsageStatsJSON `json:"total"`
ByUser map[string]UsageStatsJSON `json:"by_user"`
Model string `json:"model"`
Total UsageStatsJSON `json:"total"`
ByUser map[string]UsageStatsJSON `json:"by_user"`
}
type CostsSummaryJSON struct {
TotalUSD float64 `json:"total_usd"`
ByUser map[string]float64 `json:"by_user"`
ByWeek map[string]float64 `json:"by_week,omitempty"`
}
type AggregatedUsageJSON struct {
@@ -91,11 +84,6 @@ type AggregatedUsageJSON struct {
Combinations []CostCombinationJSON `json:"combinations"`
}
type WeeklyCycleHint struct {
WindowMinutes int64
ResetAt time.Time
}
type ModelPricing struct {
InputPrice float64
OutputPrice float64
@@ -107,123 +95,7 @@ type modelFamily struct {
pricing ModelPricing
}
const (
serviceTierAuto = "auto"
serviceTierDefault = "default"
serviceTierFlex = "flex"
serviceTierPriority = "priority"
serviceTierScale = "scale"
)
var (
gpt52Pricing = ModelPricing{
InputPrice: 1.75,
OutputPrice: 14.0,
CachedInputPrice: 0.175,
}
gpt5Pricing = ModelPricing{
InputPrice: 1.25,
OutputPrice: 10.0,
CachedInputPrice: 0.125,
}
gpt5MiniPricing = ModelPricing{
InputPrice: 0.25,
OutputPrice: 2.0,
CachedInputPrice: 0.025,
}
gpt5NanoPricing = ModelPricing{
InputPrice: 0.05,
OutputPrice: 0.4,
CachedInputPrice: 0.005,
}
gpt52CodexPricing = ModelPricing{
InputPrice: 1.75,
OutputPrice: 14.0,
CachedInputPrice: 0.175,
}
gpt51CodexPricing = ModelPricing{
InputPrice: 1.25,
OutputPrice: 10.0,
CachedInputPrice: 0.125,
}
gpt51CodexMiniPricing = ModelPricing{
InputPrice: 0.25,
OutputPrice: 2.0,
CachedInputPrice: 0.025,
}
gpt52ProPricing = ModelPricing{
InputPrice: 21.0,
OutputPrice: 168.0,
CachedInputPrice: 21.0,
}
gpt5ProPricing = ModelPricing{
InputPrice: 15.0,
OutputPrice: 120.0,
CachedInputPrice: 15.0,
}
gpt52FlexPricing = ModelPricing{
InputPrice: 0.875,
OutputPrice: 7.0,
CachedInputPrice: 0.0875,
}
gpt5FlexPricing = ModelPricing{
InputPrice: 0.625,
OutputPrice: 5.0,
CachedInputPrice: 0.0625,
}
gpt5MiniFlexPricing = ModelPricing{
InputPrice: 0.125,
OutputPrice: 1.0,
CachedInputPrice: 0.0125,
}
gpt5NanoFlexPricing = ModelPricing{
InputPrice: 0.025,
OutputPrice: 0.2,
CachedInputPrice: 0.0025,
}
gpt52PriorityPricing = ModelPricing{
InputPrice: 3.5,
OutputPrice: 28.0,
CachedInputPrice: 0.35,
}
gpt5PriorityPricing = ModelPricing{
InputPrice: 2.5,
OutputPrice: 20.0,
CachedInputPrice: 0.25,
}
gpt5MiniPriorityPricing = ModelPricing{
InputPrice: 0.45,
OutputPrice: 3.6,
CachedInputPrice: 0.045,
}
gpt52CodexPriorityPricing = ModelPricing{
InputPrice: 3.5,
OutputPrice: 28.0,
CachedInputPrice: 0.35,
}
gpt51CodexPriorityPricing = ModelPricing{
InputPrice: 2.5,
OutputPrice: 20.0,
CachedInputPrice: 0.25,
}
gpt4oPricing = ModelPricing{
InputPrice: 2.5,
OutputPrice: 10.0,
@@ -239,19 +111,7 @@ var (
gpt4oAudioPricing = ModelPricing{
InputPrice: 2.5,
OutputPrice: 10.0,
CachedInputPrice: 2.5,
}
gpt4oMiniAudioPricing = ModelPricing{
InputPrice: 0.15,
OutputPrice: 0.6,
CachedInputPrice: 0.15,
}
gptAudioMiniPricing = ModelPricing{
InputPrice: 0.6,
OutputPrice: 2.4,
CachedInputPrice: 0.6,
CachedInputPrice: 1.25,
}
o1Pricing = ModelPricing{
@@ -260,12 +120,6 @@ var (
CachedInputPrice: 7.5,
}
o1ProPricing = ModelPricing{
InputPrice: 150.0,
OutputPrice: 600.0,
CachedInputPrice: 150.0,
}
o1MiniPricing = ModelPricing{
InputPrice: 1.1,
OutputPrice: 4.4,
@@ -281,55 +135,13 @@ var (
o3Pricing = ModelPricing{
InputPrice: 2.0,
OutputPrice: 8.0,
CachedInputPrice: 0.5,
}
o3ProPricing = ModelPricing{
InputPrice: 20.0,
OutputPrice: 80.0,
CachedInputPrice: 20.0,
}
o3DeepResearchPricing = ModelPricing{
InputPrice: 10.0,
OutputPrice: 40.0,
CachedInputPrice: 2.5,
CachedInputPrice: 1.0,
}
o4MiniPricing = ModelPricing{
InputPrice: 1.1,
OutputPrice: 4.4,
CachedInputPrice: 0.275,
}
o4MiniDeepResearchPricing = ModelPricing{
InputPrice: 2.0,
OutputPrice: 8.0,
CachedInputPrice: 0.5,
}
o3FlexPricing = ModelPricing{
InputPrice: 1.0,
OutputPrice: 4.0,
CachedInputPrice: 0.25,
}
o4MiniFlexPricing = ModelPricing{
InputPrice: 0.55,
OutputPrice: 2.2,
CachedInputPrice: 0.138,
}
o3PriorityPricing = ModelPricing{
InputPrice: 3.5,
OutputPrice: 14.0,
CachedInputPrice: 0.875,
}
o4MiniPriorityPricing = ModelPricing{
InputPrice: 2.0,
OutputPrice: 8.0,
CachedInputPrice: 0.5,
CachedInputPrice: 0.55,
}
gpt41Pricing = ModelPricing{
@@ -350,374 +162,69 @@ var (
CachedInputPrice: 0.025,
}
gpt41PriorityPricing = ModelPricing{
InputPrice: 3.5,
OutputPrice: 14.0,
CachedInputPrice: 0.875,
}
gpt41MiniPriorityPricing = ModelPricing{
InputPrice: 0.7,
OutputPrice: 2.8,
CachedInputPrice: 0.175,
}
gpt41NanoPriorityPricing = ModelPricing{
InputPrice: 0.2,
OutputPrice: 0.8,
CachedInputPrice: 0.05,
}
gpt4oPriorityPricing = ModelPricing{
InputPrice: 4.25,
OutputPrice: 17.0,
CachedInputPrice: 2.125,
}
gpt4oMiniPriorityPricing = ModelPricing{
InputPrice: 0.25,
OutputPrice: 1.0,
CachedInputPrice: 0.125,
}
standardModelFamilies = []modelFamily{
modelFamilies = []modelFamily{
{
pattern: regexp.MustCompile(`^gpt-5\.3-codex(?:$|-)`),
pricing: gpt52CodexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2-codex(?:$|-)`),
pricing: gpt52CodexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-codex-max(?:$|-)`),
pricing: gpt51CodexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-codex-mini(?:$|-)`),
pricing: gpt51CodexMiniPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-codex(?:$|-)`),
pricing: gpt51CodexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-codex-mini(?:$|-)`),
pricing: gpt51CodexMiniPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-codex(?:$|-)`),
pricing: gpt51CodexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2-chat-latest$`),
pricing: gpt52Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-chat-latest$`),
pricing: gpt5Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-chat-latest$`),
pricing: gpt5Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2-pro(?:$|-)`),
pricing: gpt52ProPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-pro(?:$|-)`),
pricing: gpt5ProPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-mini(?:$|-)`),
pricing: gpt5MiniPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-nano(?:$|-)`),
pricing: gpt5NanoPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2(?:$|-)`),
pricing: gpt52Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1(?:$|-)`),
pricing: gpt5Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-5(?:$|-)`),
pricing: gpt5Pricing,
},
{
pattern: regexp.MustCompile(`^o4-mini-deep-research(?:$|-)`),
pricing: o4MiniDeepResearchPricing,
},
{
pattern: regexp.MustCompile(`^o4-mini(?:$|-)`),
pricing: o4MiniPricing,
},
{
pattern: regexp.MustCompile(`^o3-pro(?:$|-)`),
pricing: o3ProPricing,
},
{
pattern: regexp.MustCompile(`^o3-deep-research(?:$|-)`),
pricing: o3DeepResearchPricing,
},
{
pattern: regexp.MustCompile(`^o3-mini(?:$|-)`),
pricing: o3MiniPricing,
},
{
pattern: regexp.MustCompile(`^o3(?:$|-)`),
pricing: o3Pricing,
},
{
pattern: regexp.MustCompile(`^o1-pro(?:$|-)`),
pricing: o1ProPricing,
},
{
pattern: regexp.MustCompile(`^o1-mini(?:$|-)`),
pricing: o1MiniPricing,
},
{
pattern: regexp.MustCompile(`^o1(?:$|-)`),
pricing: o1Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o-mini-audio(?:$|-)`),
pricing: gpt4oMiniAudioPricing,
},
{
pattern: regexp.MustCompile(`^gpt-audio-mini(?:$|-)`),
pricing: gptAudioMiniPricing,
},
{
pattern: regexp.MustCompile(`^(?:gpt-4o-audio|gpt-audio)(?:$|-)`),
pricing: gpt4oAudioPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1-nano(?:$|-)`),
pattern: regexp.MustCompile(`^gpt-4\.1-nano`),
pricing: gpt41NanoPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1-mini(?:$|-)`),
pattern: regexp.MustCompile(`^gpt-4\.1-mini`),
pricing: gpt41MiniPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1(?:$|-)`),
pattern: regexp.MustCompile(`^gpt-4\.1`),
pricing: gpt41Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o-mini(?:$|-)`),
pattern: regexp.MustCompile(`^o4-mini`),
pricing: o4MiniPricing,
},
{
pattern: regexp.MustCompile(`^o3-mini`),
pricing: o3MiniPricing,
},
{
pattern: regexp.MustCompile(`^o3`),
pricing: o3Pricing,
},
{
pattern: regexp.MustCompile(`^o1-mini`),
pricing: o1MiniPricing,
},
{
pattern: regexp.MustCompile(`^o1`),
pricing: o1Pricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o-audio`),
pricing: gpt4oAudioPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o-mini`),
pricing: gpt4oMiniPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o(?:$|-)`),
pattern: regexp.MustCompile(`^gpt-4o`),
pricing: gpt4oPricing,
},
{
pattern: regexp.MustCompile(`^chatgpt-4o(?:$|-)`),
pattern: regexp.MustCompile(`^chatgpt-4o`),
pricing: gpt4oPricing,
},
}
flexModelFamilies = []modelFamily{
{
pattern: regexp.MustCompile(`^gpt-5-mini(?:$|-)`),
pricing: gpt5MiniFlexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-nano(?:$|-)`),
pricing: gpt5NanoFlexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2(?:$|-)`),
pricing: gpt52FlexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1(?:$|-)`),
pricing: gpt5FlexPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5(?:$|-)`),
pricing: gpt5FlexPricing,
},
{
pattern: regexp.MustCompile(`^o4-mini(?:$|-)`),
pricing: o4MiniFlexPricing,
},
{
pattern: regexp.MustCompile(`^o3(?:$|-)`),
pricing: o3FlexPricing,
},
}
priorityModelFamilies = []modelFamily{
{
pattern: regexp.MustCompile(`^gpt-5\.3-codex(?:$|-)`),
pricing: gpt52CodexPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2-codex(?:$|-)`),
pricing: gpt52CodexPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-codex-max(?:$|-)`),
pricing: gpt51CodexPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1-codex(?:$|-)`),
pricing: gpt51CodexPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-codex-mini(?:$|-)`),
pricing: gpt5MiniPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-codex(?:$|-)`),
pricing: gpt51CodexPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5-mini(?:$|-)`),
pricing: gpt5MiniPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.2(?:$|-)`),
pricing: gpt52PriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5\.1(?:$|-)`),
pricing: gpt5PriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-5(?:$|-)`),
pricing: gpt5PriorityPricing,
},
{
pattern: regexp.MustCompile(`^o4-mini(?:$|-)`),
pricing: o4MiniPriorityPricing,
},
{
pattern: regexp.MustCompile(`^o3(?:$|-)`),
pricing: o3PriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1-nano(?:$|-)`),
pricing: gpt41NanoPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1-mini(?:$|-)`),
pricing: gpt41MiniPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4\.1(?:$|-)`),
pricing: gpt41PriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o-mini(?:$|-)`),
pricing: gpt4oMiniPriorityPricing,
},
{
pattern: regexp.MustCompile(`^gpt-4o(?:$|-)`),
pricing: gpt4oPriorityPricing,
},
}
)
func modelFamiliesForTier(serviceTier string) []modelFamily {
switch serviceTier {
case serviceTierFlex:
return flexModelFamilies
case serviceTierPriority:
return priorityModelFamilies
default:
return standardModelFamilies
}
}
func findPricingInFamilies(model string, modelFamilies []modelFamily) (ModelPricing, bool) {
func getPricing(model string) ModelPricing {
for _, family := range modelFamilies {
if family.pattern.MatchString(model) {
return family.pricing, true
return family.pricing
}
}
return ModelPricing{}, false
}
func normalizeServiceTier(serviceTier string) string {
switch strings.ToLower(strings.TrimSpace(serviceTier)) {
case "", serviceTierAuto, serviceTierDefault:
return serviceTierDefault
case serviceTierFlex:
return serviceTierFlex
case serviceTierPriority:
return serviceTierPriority
case serviceTierScale:
// Scale-tier requests are prepaid differently and not listed in this usage file.
return serviceTierDefault
default:
return serviceTierDefault
}
}
func getPricing(model string, serviceTier string) ModelPricing {
normalizedServiceTier := normalizeServiceTier(serviceTier)
modelFamilies := modelFamiliesForTier(normalizedServiceTier)
if pricing, found := findPricingInFamilies(model, modelFamilies); found {
return pricing
}
normalizedModel := normalizeGPT5Model(model)
if normalizedModel != model {
if pricing, found := findPricingInFamilies(normalizedModel, modelFamilies); found {
return pricing
}
}
if normalizedServiceTier != serviceTierDefault {
if pricing, found := findPricingInFamilies(model, standardModelFamilies); found {
return pricing
}
if normalizedModel != model {
if pricing, found := findPricingInFamilies(normalizedModel, standardModelFamilies); found {
return pricing
}
}
}
return gpt4oPricing
}
func normalizeGPT5Model(model string) string {
if !strings.HasPrefix(model, "gpt-5.") {
return model
}
switch {
case strings.Contains(model, "-codex-mini"):
return "gpt-5.1-codex-mini"
case strings.Contains(model, "-codex-max"):
return "gpt-5.1-codex-max"
case strings.Contains(model, "-codex"):
return "gpt-5.3-codex"
case strings.Contains(model, "-chat-latest"):
return "gpt-5.2-chat-latest"
case strings.Contains(model, "-pro"):
return "gpt-5.2-pro"
case strings.Contains(model, "-mini"):
return "gpt-5-mini"
case strings.Contains(model, "-nano"):
return "gpt-5-nano"
default:
return "gpt-5.2"
}
}
func calculateCost(stats UsageStats, model string, serviceTier string) float64 {
pricing := getPricing(model, serviceTier)
func calculateCost(stats UsageStats, model string) float64 {
pricing := getPricing(model)
regularInputTokens := stats.InputTokens - stats.CachedTokens
if regularInputTokens < 0 {
@@ -731,89 +238,41 @@ func calculateCost(stats UsageStats, model string, serviceTier string) float64 {
return math.Round(cost*100) / 100
}
func roundCost(cost float64) float64 {
return math.Round(cost*100) / 100
}
func (u *AggregatedUsage) ToJSON() *AggregatedUsageJSON {
u.mutex.Lock()
defer u.mutex.Unlock()
func normalizeCombinations(combinations []CostCombination) {
for index := range combinations {
combinations[index].ServiceTier = normalizeServiceTier(combinations[index].ServiceTier)
if combinations[index].ByUser == nil {
combinations[index].ByUser = make(map[string]UsageStats)
}
}
}
func addUsageToCombinations(combinations *[]CostCombination, model string, serviceTier string, weekStartUnix int64, user string, inputTokens, outputTokens, cachedTokens int64) {
var matchedCombination *CostCombination
for index := range *combinations {
combination := &(*combinations)[index]
combinationServiceTier := normalizeServiceTier(combination.ServiceTier)
if combination.ServiceTier != combinationServiceTier {
combination.ServiceTier = combinationServiceTier
}
if combination.Model == model && combinationServiceTier == serviceTier && combination.WeekStartUnix == weekStartUnix {
matchedCombination = combination
break
}
result := &AggregatedUsageJSON{
LastUpdated: u.LastUpdated,
Combinations: make([]CostCombinationJSON, len(u.Combinations)),
Costs: CostsSummaryJSON{
TotalUSD: 0,
ByUser: make(map[string]float64),
},
}
if matchedCombination == nil {
newCombination := CostCombination{
Model: model,
ServiceTier: serviceTier,
WeekStartUnix: weekStartUnix,
Total: UsageStats{},
ByUser: make(map[string]UsageStats),
}
*combinations = append(*combinations, newCombination)
matchedCombination = &(*combinations)[len(*combinations)-1]
}
for i, combo := range u.Combinations {
totalCost := calculateCost(combo.Total, combo.Model)
matchedCombination.Total.RequestCount++
matchedCombination.Total.InputTokens += inputTokens
matchedCombination.Total.OutputTokens += outputTokens
matchedCombination.Total.CachedTokens += cachedTokens
result.Costs.TotalUSD += totalCost
if user != "" {
userStats := matchedCombination.ByUser[user]
userStats.RequestCount++
userStats.InputTokens += inputTokens
userStats.OutputTokens += outputTokens
userStats.CachedTokens += cachedTokens
matchedCombination.ByUser[user] = userStats
}
}
func buildCombinationJSON(combinations []CostCombination, aggregateUserCosts map[string]float64) ([]CostCombinationJSON, float64) {
result := make([]CostCombinationJSON, len(combinations))
var totalCost float64
for index, combination := range combinations {
combinationTotalCost := calculateCost(combination.Total, combination.Model, combination.ServiceTier)
totalCost += combinationTotalCost
combinationJSON := CostCombinationJSON{
Model: combination.Model,
ServiceTier: combination.ServiceTier,
WeekStartUnix: combination.WeekStartUnix,
comboJSON := CostCombinationJSON{
Model: combo.Model,
Total: UsageStatsJSON{
RequestCount: combination.Total.RequestCount,
InputTokens: combination.Total.InputTokens,
OutputTokens: combination.Total.OutputTokens,
CachedTokens: combination.Total.CachedTokens,
CostUSD: combinationTotalCost,
RequestCount: combo.Total.RequestCount,
InputTokens: combo.Total.InputTokens,
OutputTokens: combo.Total.OutputTokens,
CachedTokens: combo.Total.CachedTokens,
CostUSD: totalCost,
},
ByUser: make(map[string]UsageStatsJSON),
}
for user, userStats := range combination.ByUser {
userCost := calculateCost(userStats, combination.Model, combination.ServiceTier)
if aggregateUserCosts != nil {
aggregateUserCosts[user] += userCost
}
for user, userStats := range combo.ByUser {
userCost := calculateCost(userStats, combo.Model)
result.Costs.ByUser[user] += userCost
combinationJSON.ByUser[user] = UsageStatsJSON{
comboJSON.ByUser[user] = UsageStatsJSON{
RequestCount: userStats.RequestCount,
InputTokens: userStats.InputTokens,
OutputTokens: userStats.OutputTokens,
@@ -822,80 +281,12 @@ func buildCombinationJSON(combinations []CostCombination, aggregateUserCosts map
}
}
result[index] = combinationJSON
}
return result, roundCost(totalCost)
}
func formatUTCOffsetLabel(timestamp time.Time) string {
_, offsetSeconds := timestamp.Zone()
sign := "+"
if offsetSeconds < 0 {
sign = "-"
offsetSeconds = -offsetSeconds
}
offsetHours := offsetSeconds / 3600
offsetMinutes := (offsetSeconds % 3600) / 60
if offsetMinutes == 0 {
return fmt.Sprintf("UTC%s%d", sign, offsetHours)
}
return fmt.Sprintf("UTC%s%d:%02d", sign, offsetHours, offsetMinutes)
}
func formatWeekStartKey(cycleStartAt time.Time) string {
localCycleStart := cycleStartAt.In(time.Local)
return fmt.Sprintf("%s %s", localCycleStart.Format("2006-01-02 15:04:05"), formatUTCOffsetLabel(localCycleStart))
}
func buildByWeekCost(combinations []CostCombination) map[string]float64 {
byWeek := make(map[string]float64)
for _, combination := range combinations {
if combination.WeekStartUnix <= 0 {
continue
}
weekStartAt := time.Unix(combination.WeekStartUnix, 0).UTC()
weekKey := formatWeekStartKey(weekStartAt)
byWeek[weekKey] += calculateCost(combination.Total, combination.Model, combination.ServiceTier)
}
for weekKey, weekCost := range byWeek {
byWeek[weekKey] = roundCost(weekCost)
}
return byWeek
}
func deriveWeekStartUnix(cycleHint *WeeklyCycleHint) int64 {
if cycleHint == nil || cycleHint.WindowMinutes <= 0 || cycleHint.ResetAt.IsZero() {
return 0
}
windowDuration := time.Duration(cycleHint.WindowMinutes) * time.Minute
return cycleHint.ResetAt.UTC().Add(-windowDuration).Unix()
}
func (u *AggregatedUsage) ToJSON() *AggregatedUsageJSON {
u.mutex.Lock()
defer u.mutex.Unlock()
result := &AggregatedUsageJSON{
LastUpdated: u.LastUpdated,
Costs: CostsSummaryJSON{
TotalUSD: 0,
ByUser: make(map[string]float64),
ByWeek: make(map[string]float64),
},
}
globalCombinationsJSON, totalCost := buildCombinationJSON(u.Combinations, result.Costs.ByUser)
result.Combinations = globalCombinationsJSON
result.Costs.TotalUSD = totalCost
result.Costs.ByWeek = buildByWeekCost(u.Combinations)
if len(result.Costs.ByWeek) == 0 {
result.Costs.ByWeek = nil
result.Combinations[i] = comboJSON
}
result.Costs.TotalUSD = math.Round(result.Costs.TotalUSD*100) / 100
for user, cost := range result.Costs.ByUser {
result.Costs.ByUser[user] = roundCost(cost)
result.Costs.ByUser[user] = math.Round(cost*100) / 100
}
return result
@@ -905,9 +296,6 @@ func (u *AggregatedUsage) Load() error {
u.mutex.Lock()
defer u.mutex.Unlock()
u.LastUpdated = time.Time{}
u.Combinations = nil
data, err := os.ReadFile(u.filePath)
if err != nil {
if os.IsNotExist(err) {
@@ -928,7 +316,12 @@ func (u *AggregatedUsage) Load() error {
u.LastUpdated = temp.LastUpdated
u.Combinations = temp.Combinations
normalizeCombinations(u.Combinations)
for i := range u.Combinations {
if u.Combinations[i].ByUser == nil {
u.Combinations[i].ByUser = make(map[string]UsageStats)
}
}
return nil
}
@@ -956,27 +349,47 @@ func (u *AggregatedUsage) Save() error {
return err
}
func (u *AggregatedUsage) AddUsage(model string, inputTokens, outputTokens, cachedTokens int64, serviceTier string, user string) error {
return u.AddUsageWithCycleHint(model, inputTokens, outputTokens, cachedTokens, serviceTier, user, time.Now(), nil)
}
func (u *AggregatedUsage) AddUsageWithCycleHint(model string, inputTokens, outputTokens, cachedTokens int64, serviceTier string, user string, observedAt time.Time, cycleHint *WeeklyCycleHint) error {
func (u *AggregatedUsage) AddUsage(model string, inputTokens, outputTokens, cachedTokens int64, user string) error {
if model == "" {
return E.New("model cannot be empty")
}
normalizedServiceTier := normalizeServiceTier(serviceTier)
if observedAt.IsZero() {
observedAt = time.Now()
}
u.mutex.Lock()
defer u.mutex.Unlock()
u.LastUpdated = observedAt
weekStartUnix := deriveWeekStartUnix(cycleHint)
u.LastUpdated = time.Now()
addUsageToCombinations(&u.Combinations, model, normalizedServiceTier, weekStartUnix, user, inputTokens, outputTokens, cachedTokens)
var combo *CostCombination
for i := range u.Combinations {
if u.Combinations[i].Model == model {
combo = &u.Combinations[i]
break
}
}
if combo == nil {
newCombo := CostCombination{
Model: model,
Total: UsageStats{},
ByUser: make(map[string]UsageStats),
}
u.Combinations = append(u.Combinations, newCombo)
combo = &u.Combinations[len(u.Combinations)-1]
}
combo.Total.RequestCount++
combo.Total.InputTokens += inputTokens
combo.Total.OutputTokens += outputTokens
combo.Total.CachedTokens += cachedTokens
if user != "" {
userStats := combo.ByUser[user]
userStats.RequestCount++
userStats.InputTokens += inputTokens
userStats.OutputTokens += outputTokens
userStats.CachedTokens += cachedTokens
combo.ByUser[user] = userStats
}
go u.scheduleSave()

View File

@@ -1,51 +0,0 @@
package oomkiller
import (
"time"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
)
func buildTimerConfig(options option.OOMKillerServiceOptions, memoryLimit uint64, useAvailable bool) (timerConfig, error) {
safetyMargin := uint64(defaultSafetyMargin)
if options.SafetyMargin != nil && options.SafetyMargin.Value() > 0 {
safetyMargin = options.SafetyMargin.Value()
}
minInterval := defaultMinInterval
if options.MinInterval != 0 {
minInterval = time.Duration(options.MinInterval.Build())
if minInterval <= 0 {
return timerConfig{}, E.New("min_interval must be greater than 0")
}
}
maxInterval := defaultMaxInterval
if options.MaxInterval != 0 {
maxInterval = time.Duration(options.MaxInterval.Build())
if maxInterval <= 0 {
return timerConfig{}, E.New("max_interval must be greater than 0")
}
}
if maxInterval < minInterval {
return timerConfig{}, E.New("max_interval must be greater than or equal to min_interval")
}
checksBeforeLimit := defaultChecksBeforeLimit
if options.ChecksBeforeLimit != 0 {
checksBeforeLimit = options.ChecksBeforeLimit
if checksBeforeLimit <= 0 {
return timerConfig{}, E.New("checks_before_limit must be greater than 0")
}
}
return timerConfig{
memoryLimit: memoryLimit,
safetyMargin: safetyMargin,
minInterval: minInterval,
maxInterval: maxInterval,
checksBeforeLimit: checksBeforeLimit,
useAvailable: useAvailable,
}, nil
}

View File

@@ -1,192 +0,0 @@
//go:build darwin && cgo
package oomkiller
/*
#include <dispatch/dispatch.h>
static dispatch_source_t memoryPressureSource;
extern void goMemoryPressureCallback(unsigned long status);
static void startMemoryPressureMonitor() {
memoryPressureSource = dispatch_source_create(
DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
0,
DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL,
dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0)
);
dispatch_source_set_event_handler(memoryPressureSource, ^{
unsigned long status = dispatch_source_get_data(memoryPressureSource);
goMemoryPressureCallback(status);
});
dispatch_activate(memoryPressureSource);
}
static void stopMemoryPressureMonitor() {
if (memoryPressureSource) {
dispatch_source_cancel(memoryPressureSource);
memoryPressureSource = NULL;
}
}
*/
import "C"
import (
"context"
runtimeDebug "runtime/debug"
"sync"
"github.com/sagernet/sing-box/adapter"
boxService "github.com/sagernet/sing-box/adapter/service"
boxConstant "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
"github.com/sagernet/sing/common/memory"
"github.com/sagernet/sing/service"
)
func RegisterService(registry *boxService.Registry) {
boxService.Register[option.OOMKillerServiceOptions](registry, boxConstant.TypeOOMKiller, NewService)
}
var (
globalAccess sync.Mutex
globalServices []*Service
)
type Service struct {
boxService.Adapter
logger log.ContextLogger
router adapter.Router
memoryLimit uint64
hasTimerMode bool
useAvailable bool
timerConfig timerConfig
adaptiveTimer *adaptiveTimer
}
func NewService(ctx context.Context, logger log.ContextLogger, tag string, options option.OOMKillerServiceOptions) (adapter.Service, error) {
s := &Service{
Adapter: boxService.NewAdapter(boxConstant.TypeOOMKiller, tag),
logger: logger,
router: service.FromContext[adapter.Router](ctx),
}
if options.MemoryLimit != nil {
s.memoryLimit = options.MemoryLimit.Value()
if s.memoryLimit > 0 {
s.hasTimerMode = true
}
}
config, err := buildTimerConfig(options, s.memoryLimit, s.useAvailable)
if err != nil {
return nil, err
}
s.timerConfig = config
return s, nil
}
func (s *Service) Start(stage adapter.StartStage) error {
if stage != adapter.StartStateStart {
return nil
}
if s.hasTimerMode {
s.adaptiveTimer = newAdaptiveTimer(s.logger, s.router, s.timerConfig)
if s.memoryLimit > 0 {
s.logger.Info("started memory monitor with limit: ", s.memoryLimit/(1024*1024), " MiB")
} else {
s.logger.Info("started memory monitor with available memory detection")
}
} else {
s.logger.Info("started memory pressure monitor")
}
globalAccess.Lock()
isFirst := len(globalServices) == 0
globalServices = append(globalServices, s)
globalAccess.Unlock()
if isFirst {
C.startMemoryPressureMonitor()
}
return nil
}
func (s *Service) Close() error {
if s.adaptiveTimer != nil {
s.adaptiveTimer.stop()
}
globalAccess.Lock()
for i, svc := range globalServices {
if svc == s {
globalServices = append(globalServices[:i], globalServices[i+1:]...)
break
}
}
isLast := len(globalServices) == 0
globalAccess.Unlock()
if isLast {
C.stopMemoryPressureMonitor()
}
return nil
}
//export goMemoryPressureCallback
func goMemoryPressureCallback(status C.ulong) {
globalAccess.Lock()
services := make([]*Service, len(globalServices))
copy(services, globalServices)
globalAccess.Unlock()
if len(services) == 0 {
return
}
criticalFlag := C.ulong(C.DISPATCH_MEMORYPRESSURE_CRITICAL)
warnFlag := C.ulong(C.DISPATCH_MEMORYPRESSURE_WARN)
isCritical := status&criticalFlag != 0
isWarning := status&warnFlag != 0
var level string
switch {
case isCritical:
level = "critical"
case isWarning:
level = "warning"
default:
level = "normal"
}
var freeOSMemory bool
for _, s := range services {
usage := memory.Total()
if s.hasTimerMode {
if isCritical {
s.logger.Warn("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB")
if s.adaptiveTimer != nil {
s.adaptiveTimer.startNow()
}
} else if isWarning {
s.logger.Warn("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB")
} else {
s.logger.Debug("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB")
if s.adaptiveTimer != nil {
s.adaptiveTimer.stop()
}
}
} else {
if isCritical {
s.logger.Error("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB, resetting network")
s.router.ResetNetwork()
freeOSMemory = true
} else if isWarning {
s.logger.Warn("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB")
} else {
s.logger.Debug("memory pressure: ", level, ", usage: ", usage/(1024*1024), " MiB")
}
}
}
if freeOSMemory {
runtimeDebug.FreeOSMemory()
}
}

View File

@@ -1,81 +0,0 @@
//go:build !darwin || !cgo
package oomkiller
import (
"context"
"github.com/sagernet/sing-box/adapter"
boxService "github.com/sagernet/sing-box/adapter/service"
boxConstant "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/memory"
"github.com/sagernet/sing/service"
)
func RegisterService(registry *boxService.Registry) {
boxService.Register[option.OOMKillerServiceOptions](registry, boxConstant.TypeOOMKiller, NewService)
}
type Service struct {
boxService.Adapter
logger log.ContextLogger
router adapter.Router
adaptiveTimer *adaptiveTimer
timerConfig timerConfig
hasTimerMode bool
useAvailable bool
memoryLimit uint64
}
func NewService(ctx context.Context, logger log.ContextLogger, tag string, options option.OOMKillerServiceOptions) (adapter.Service, error) {
s := &Service{
Adapter: boxService.NewAdapter(boxConstant.TypeOOMKiller, tag),
logger: logger,
router: service.FromContext[adapter.Router](ctx),
}
if options.MemoryLimit != nil {
s.memoryLimit = options.MemoryLimit.Value()
}
if s.memoryLimit > 0 {
s.hasTimerMode = true
} else if memory.AvailableSupported() {
s.useAvailable = true
s.hasTimerMode = true
}
config, err := buildTimerConfig(options, s.memoryLimit, s.useAvailable)
if err != nil {
return nil, err
}
s.timerConfig = config
return s, nil
}
func (s *Service) Start(stage adapter.StartStage) error {
if stage != adapter.StartStateStart {
return nil
}
if !s.hasTimerMode {
return E.New("memory pressure monitoring is not available on this platform without memory_limit")
}
s.adaptiveTimer = newAdaptiveTimer(s.logger, s.router, s.timerConfig)
s.adaptiveTimer.start(0)
if s.useAvailable {
s.logger.Info("started memory monitor with available memory detection")
} else {
s.logger.Info("started memory monitor with limit: ", s.memoryLimit/(1024*1024), " MiB")
}
return nil
}
func (s *Service) Close() error {
if s.adaptiveTimer != nil {
s.adaptiveTimer.stop()
}
return nil
}

View File

@@ -1,158 +0,0 @@
package oomkiller
import (
runtimeDebug "runtime/debug"
"sync"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing/common/memory"
)
const (
defaultChecksBeforeLimit = 4
defaultMinInterval = 500 * time.Millisecond
defaultMaxInterval = 10 * time.Second
defaultSafetyMargin = 5 * 1024 * 1024
)
type adaptiveTimer struct {
logger log.ContextLogger
router adapter.Router
memoryLimit uint64
safetyMargin uint64
minInterval time.Duration
maxInterval time.Duration
checksBeforeLimit int
useAvailable bool
access sync.Mutex
timer *time.Timer
previousUsage uint64
lastInterval time.Duration
}
type timerConfig struct {
memoryLimit uint64
safetyMargin uint64
minInterval time.Duration
maxInterval time.Duration
checksBeforeLimit int
useAvailable bool
}
func newAdaptiveTimer(logger log.ContextLogger, router adapter.Router, config timerConfig) *adaptiveTimer {
return &adaptiveTimer{
logger: logger,
router: router,
memoryLimit: config.memoryLimit,
safetyMargin: config.safetyMargin,
minInterval: config.minInterval,
maxInterval: config.maxInterval,
checksBeforeLimit: config.checksBeforeLimit,
useAvailable: config.useAvailable,
}
}
func (t *adaptiveTimer) start(_ uint64) {
t.access.Lock()
defer t.access.Unlock()
t.startLocked()
}
func (t *adaptiveTimer) startNow() {
t.access.Lock()
t.startLocked()
t.access.Unlock()
t.poll()
}
func (t *adaptiveTimer) startLocked() {
if t.timer != nil {
return
}
t.previousUsage = memory.Total()
t.lastInterval = t.minInterval
t.timer = time.AfterFunc(t.minInterval, t.poll)
}
func (t *adaptiveTimer) stop() {
t.access.Lock()
defer t.access.Unlock()
t.stopLocked()
}
func (t *adaptiveTimer) stopLocked() {
if t.timer != nil {
t.timer.Stop()
t.timer = nil
}
}
func (t *adaptiveTimer) running() bool {
t.access.Lock()
defer t.access.Unlock()
return t.timer != nil
}
func (t *adaptiveTimer) poll() {
t.access.Lock()
defer t.access.Unlock()
if t.timer == nil {
return
}
usage := memory.Total()
delta := int64(usage) - int64(t.previousUsage)
t.previousUsage = usage
var remaining uint64
var triggered bool
if t.memoryLimit > 0 {
if usage >= t.memoryLimit {
remaining = 0
triggered = true
} else {
remaining = t.memoryLimit - usage
}
} else if t.useAvailable {
available := memory.Available()
if available <= t.safetyMargin {
remaining = 0
triggered = true
} else {
remaining = available - t.safetyMargin
}
} else {
remaining = 0
}
if triggered {
t.logger.Error("memory threshold reached, usage: ", usage/(1024*1024), " MiB, resetting network")
t.router.ResetNetwork()
runtimeDebug.FreeOSMemory()
}
var interval time.Duration
if triggered {
interval = t.maxInterval
} else if delta <= 0 {
interval = t.maxInterval
} else if t.checksBeforeLimit <= 0 {
interval = t.maxInterval
} else {
timeToLimit := time.Duration(float64(remaining) / float64(delta) * float64(t.lastInterval))
interval = timeToLimit / time.Duration(t.checksBeforeLimit)
if interval < t.minInterval {
interval = t.minInterval
}
if interval > t.maxInterval {
interval = t.maxInterval
}
}
t.lastInterval = interval
t.timer.Reset(interval)
}

Some files were not shown because too many files have changed in this diff Show More