Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22e13c9c0d | ||
|
|
4e66d187a7 | ||
|
|
94c1c81ee0 |
95
.github/workflows/release.yml
vendored
Normal file
95
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.os }}-${{ matrix.arch }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
arch: amd64
|
||||
- os: linux
|
||||
arch: arm64
|
||||
- os: darwin
|
||||
arch: amd64
|
||||
- os: darwin
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build binary
|
||||
env:
|
||||
GOOS: ${{ matrix.os }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: 0
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BINARY_NAME=go-alived-${{ matrix.os }}-${{ matrix.arch }}
|
||||
if [ "${{ matrix.os }}" = "windows" ]; then
|
||||
BINARY_NAME="${BINARY_NAME}.exe"
|
||||
fi
|
||||
go build -ldflags="-s -w -X github.com/loveuer/go-alived/internal/cmd.Version=${{ steps.version.outputs.VERSION }}" \
|
||||
-o dist/${BINARY_NAME} .
|
||||
|
||||
# Create checksum
|
||||
cd dist && sha256sum ${BINARY_NAME} > ${BINARY_NAME}.sha256
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: go-alived-${{ matrix.os }}-${{ matrix.arch }}
|
||||
path: dist/
|
||||
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Prepare release files
|
||||
run: |
|
||||
mkdir -p release
|
||||
find artifacts -type f -exec cp {} release/ \;
|
||||
ls -la release/
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: Release ${{ steps.version.outputs.VERSION }}
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
generate_release_notes: true
|
||||
files: release/*
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
373
README.md
373
README.md
@@ -1,40 +1,35 @@
|
||||
# go-alived
|
||||
|
||||
A lightweight, dependency-free VRRP (Virtual Router Redundancy Protocol) implementation in Go, designed as a simple alternative to keepalived.
|
||||
A lightweight VRRP (Virtual Router Redundancy Protocol) implementation in Go, designed as a simple alternative to keepalived.
|
||||
|
||||
## Features
|
||||
|
||||
✅ **Phase 1: Core VRRP Functionality (Completed)**
|
||||
- VRRP protocol implementation (RFC 3768/5798)
|
||||
- Virtual IP management (add/remove VIPs)
|
||||
- State machine (INIT/BACKUP/MASTER/FAULT)
|
||||
- Priority-based master election
|
||||
- Gratuitous ARP for network updates
|
||||
- Raw socket VRRP packet send/receive
|
||||
- Timer management (advertisement & master-down timers)
|
||||
- VRRP instance manager with multi-instance support
|
||||
- Configuration hot-reload (SIGHUP)
|
||||
|
||||
✅ **Phase 2: Health Checking (Completed)**
|
||||
- Health checker interface with rise/fall logic
|
||||
- TCP health checks
|
||||
- HTTP/HTTPS health checks
|
||||
- ICMP ping checks
|
||||
- Script-based checks (custom commands)
|
||||
- Periodic health check scheduling
|
||||
- Health check integration with VRRP priority
|
||||
- Track scripts: automatic priority adjustment on health changes
|
||||
|
||||
🚧 **Phase 3: Enhanced Features (Planned)**
|
||||
- State transition scripts (notify_master/backup/fault)
|
||||
- Email/Webhook notifications
|
||||
- Sync groups
|
||||
- Virtual MAC support
|
||||
- Metrics export
|
||||
- **VRRP Protocol**: RFC 3768/5798 compliant implementation
|
||||
- **High Availability**: Automatic failover with priority-based master election
|
||||
- **Health Checking**: TCP, HTTP/HTTPS, ICMP ping, and script-based checks
|
||||
- **Easy Deployment**: Built-in install command with systemd/init.d support
|
||||
- **Hot Reload**: Configuration reload via SIGHUP without service restart
|
||||
- **Zero Dependencies**: Single static binary, no runtime dependencies
|
||||
|
||||
## Installation
|
||||
|
||||
### Build from source
|
||||
### Download Binary
|
||||
|
||||
Download the latest release from [GitHub Releases](https://github.com/loveuer/go-alived/releases):
|
||||
|
||||
```bash
|
||||
# Linux amd64
|
||||
curl -LO https://github.com/loveuer/go-alived/releases/latest/download/go-alived-linux-amd64
|
||||
chmod +x go-alived-linux-amd64
|
||||
sudo mv go-alived-linux-amd64 /usr/local/bin/go-alived
|
||||
|
||||
# Linux arm64
|
||||
curl -LO https://github.com/loveuer/go-alived/releases/latest/download/go-alived-linux-arm64
|
||||
chmod +x go-alived-linux-arm64
|
||||
sudo mv go-alived-linux-arm64 /usr/local/bin/go-alived
|
||||
```
|
||||
|
||||
### Build from Source
|
||||
|
||||
```bash
|
||||
git clone https://github.com/loveuer/go-alived.git
|
||||
@@ -42,190 +37,246 @@ cd go-alived
|
||||
go build -o go-alived .
|
||||
```
|
||||
|
||||
### Quick Install (Recommended)
|
||||
|
||||
```bash
|
||||
# Install as systemd service (default)
|
||||
sudo ./go-alived install
|
||||
|
||||
# Install as init.d service (for OpenWrt/older systems)
|
||||
sudo ./go-alived install --method service
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Test Your Environment
|
||||
|
||||
Before deployment, test if your environment supports VRRP:
|
||||
### 1. Test Environment
|
||||
|
||||
```bash
|
||||
# Basic test (auto-detect network interface)
|
||||
sudo ./go-alived test
|
||||
# Check if your environment supports VRRP
|
||||
sudo go-alived test
|
||||
|
||||
# Test specific interface
|
||||
sudo ./go-alived test -i eth0
|
||||
|
||||
# Full test with VIP
|
||||
sudo ./go-alived test -i eth0 -v 192.168.1.100/24
|
||||
# Test with specific interface
|
||||
sudo go-alived test -i eth0
|
||||
```
|
||||
|
||||
### 2. Run the Service
|
||||
### 2. Configure
|
||||
|
||||
Edit `/etc/go-alived/config.yaml`:
|
||||
|
||||
```yaml
|
||||
global:
|
||||
router_id: "node1"
|
||||
|
||||
vrrp_instances:
|
||||
- name: "VI_1"
|
||||
interface: "eth0" # Network interface
|
||||
state: "BACKUP" # Initial state
|
||||
virtual_router_id: 51 # VRID (1-255, must match on all nodes)
|
||||
priority: 100 # Higher = more likely to be master
|
||||
advert_interval: 1 # Advertisement interval in seconds
|
||||
auth_type: "PASS" # Authentication type
|
||||
auth_pass: "secret" # Password (max 8 chars)
|
||||
virtual_ips:
|
||||
- "192.168.1.100/24" # Virtual IP address(es)
|
||||
```
|
||||
|
||||
### 3. Start Service
|
||||
|
||||
```bash
|
||||
# Run with minimal config
|
||||
sudo ./go-alived run -c config.mini.yaml -d
|
||||
|
||||
# Run with full config
|
||||
sudo ./go-alived -c config.yaml
|
||||
|
||||
# Install as systemd service
|
||||
sudo ./deployment/install.sh
|
||||
# Systemd
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable go-alived
|
||||
sudo systemctl start go-alived
|
||||
|
||||
# Init.d
|
||||
sudo /etc/init.d/go-alived start
|
||||
```
|
||||
|
||||
## Usage
|
||||
### 4. Verify
|
||||
|
||||
### Commands
|
||||
```bash
|
||||
# Check service status
|
||||
sudo systemctl status go-alived
|
||||
|
||||
# Check VIP
|
||||
ip addr show eth0 | grep 192.168.1.100
|
||||
|
||||
# View logs
|
||||
sudo journalctl -u go-alived -f
|
||||
```
|
||||
go-alived # Run VRRP service (default)
|
||||
go-alived run # Run VRRP service
|
||||
go-alived test # Test environment for VRRP support
|
||||
go-alived --help # Show help
|
||||
go-alived --version # Show version
|
||||
```
|
||||
|
||||
### Global Flags
|
||||
|
||||
```
|
||||
-c, --config string Path to configuration file (default "/etc/go-alived/config.yaml")
|
||||
-d, --debug Enable debug mode
|
||||
-h, --help Show help
|
||||
-v, --version Show version
|
||||
```
|
||||
|
||||
### Test Command Flags
|
||||
|
||||
```
|
||||
-i, --interface string Network interface to test (auto-detect if not specified)
|
||||
-v, --vip string Test VIP address (e.g., 192.168.1.100/24)
|
||||
```
|
||||
|
||||
See [USAGE.md](USAGE.md) for detailed usage documentation.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Minimal Configuration
|
||||
### Two-Node HA Setup Example
|
||||
|
||||
**Node 1 (Primary)**:
|
||||
```yaml
|
||||
# config.mini.yaml - VRRP only
|
||||
global:
|
||||
router_id: "node1"
|
||||
|
||||
vrrp_instances:
|
||||
- name: "VI_1"
|
||||
interface: "eth0"
|
||||
state: "BACKUP"
|
||||
state: "MASTER"
|
||||
virtual_router_id: 51
|
||||
priority: 100
|
||||
priority: 100 # Higher priority
|
||||
advert_interval: 1
|
||||
auth_type: "PASS"
|
||||
auth_pass: "secret123"
|
||||
auth_pass: "secret"
|
||||
virtual_ips:
|
||||
- "192.168.1.100/24"
|
||||
```
|
||||
|
||||
### Full Configuration Example
|
||||
**Node 2 (Backup)**:
|
||||
```yaml
|
||||
global:
|
||||
router_id: "node2"
|
||||
|
||||
See `config.example.yaml` for complete configuration with health checking.
|
||||
vrrp_instances:
|
||||
- name: "VI_1"
|
||||
interface: "eth0"
|
||||
state: "BACKUP"
|
||||
virtual_router_id: 51
|
||||
priority: 90 # Lower priority
|
||||
advert_interval: 1
|
||||
auth_type: "PASS"
|
||||
auth_pass: "secret" # Must match
|
||||
virtual_ips:
|
||||
- "192.168.1.100/24" # Must match
|
||||
```
|
||||
|
||||
### Signals
|
||||
### Health Checking
|
||||
|
||||
- `SIGHUP`: Reload configuration
|
||||
- `SIGINT/SIGTERM`: Graceful shutdown
|
||||
```yaml
|
||||
vrrp_instances:
|
||||
- name: "VI_1"
|
||||
# ... other settings ...
|
||||
track_scripts:
|
||||
- "check_nginx" # Reference to health checker
|
||||
|
||||
## Architecture
|
||||
health_checkers:
|
||||
- name: "check_nginx"
|
||||
type: "tcp"
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
rise: 3 # Successes to mark healthy
|
||||
fall: 2 # Failures to mark unhealthy
|
||||
config:
|
||||
host: "127.0.0.1"
|
||||
port: 80
|
||||
```
|
||||
|
||||
**Supported Health Check Types**:
|
||||
|
||||
| Type | Description | Config |
|
||||
|------|-------------|--------|
|
||||
| `tcp` | TCP port check | `host`, `port` |
|
||||
| `http` | HTTP endpoint check | `url`, `method`, `expected_status` |
|
||||
| `ping` | ICMP ping check | `host`, `count` |
|
||||
| `script` | Custom script | `script`, `args` |
|
||||
|
||||
## Commands
|
||||
|
||||
```
|
||||
go-alived/
|
||||
├── main.go # Application entry point
|
||||
├── internal/
|
||||
│ ├── cmd/ # Cobra commands
|
||||
│ │ ├── root.go # Root command
|
||||
│ │ ├── run.go # Run service command
|
||||
│ │ └── test.go # Environment test command
|
||||
│ ├── vrrp/ # VRRP implementation
|
||||
│ │ ├── packet.go # VRRP packet structure & marshaling
|
||||
│ │ ├── socket.go # Raw socket operations
|
||||
│ │ ├── state.go # State machine & timers
|
||||
│ │ ├── arp.go # Gratuitous ARP
|
||||
│ │ ├── instance.go # VRRP instance logic
|
||||
│ │ └── manager.go # Instance manager
|
||||
│ └── health/ # Health check system
|
||||
│ ├── checker.go # Checker interface & state
|
||||
│ ├── monitor.go # Health check scheduler
|
||||
│ ├── tcp.go # TCP health checker
|
||||
│ ├── http.go # HTTP/HTTPS health checker
|
||||
│ ├── ping.go # ICMP ping checker
|
||||
│ ├── script.go # Script checker
|
||||
│ └── factory.go # Checker factory
|
||||
├── pkg/
|
||||
│ ├── config/ # Configuration loading & validation
|
||||
│ ├── logger/ # Logging system
|
||||
│ └── netif/ # Network interface management
|
||||
└── deployment/ # Deployment files
|
||||
├── go-alived.service # Systemd service file
|
||||
├── install.sh # Installation script
|
||||
├── uninstall.sh # Uninstallation script
|
||||
├── check-env.sh # Environment check script
|
||||
├── README.md # Deployment documentation
|
||||
└── COMPATIBILITY.md # Environment compatibility guide
|
||||
go-alived [command]
|
||||
|
||||
Available Commands:
|
||||
run Run the VRRP service
|
||||
test Test environment for VRRP support
|
||||
install Install go-alived as a system service (alias: i)
|
||||
help Help about any command
|
||||
|
||||
Flags:
|
||||
-h, --help help for go-alived
|
||||
-v, --version version for go-alived
|
||||
```
|
||||
|
||||
### run
|
||||
|
||||
```bash
|
||||
go-alived run [flags]
|
||||
|
||||
Flags:
|
||||
-c, --config string Path to config file (default "/etc/go-alived/config.yaml")
|
||||
-d, --debug Enable debug mode
|
||||
```
|
||||
|
||||
### test
|
||||
|
||||
```bash
|
||||
go-alived test [flags]
|
||||
|
||||
Flags:
|
||||
-i, --interface string Network interface to test
|
||||
-v, --vip string Test VIP address (e.g., 192.168.1.100/24)
|
||||
```
|
||||
|
||||
### install
|
||||
|
||||
```bash
|
||||
go-alived install [flags]
|
||||
|
||||
Flags:
|
||||
-m, --method string Installation method: systemd, service (default "systemd")
|
||||
|
||||
Aliases:
|
||||
install, i
|
||||
```
|
||||
|
||||
## Signals
|
||||
|
||||
| Signal | Action |
|
||||
|--------|--------|
|
||||
| `SIGHUP` | Reload configuration |
|
||||
| `SIGINT` / `SIGTERM` | Graceful shutdown |
|
||||
|
||||
```bash
|
||||
# Reload configuration
|
||||
sudo kill -HUP $(pgrep go-alived)
|
||||
```
|
||||
|
||||
## Environment Compatibility
|
||||
|
||||
### ✅ Fully Supported
|
||||
- Physical servers
|
||||
- KVM/QEMU virtual machines
|
||||
- Proxmox VE
|
||||
- VMware ESXi (with promiscuous mode)
|
||||
- VirtualBox (with bridged network + promiscuous mode)
|
||||
| Environment | Support | Notes |
|
||||
|-------------|---------|-------|
|
||||
| Physical servers | Full | |
|
||||
| KVM/QEMU/Proxmox | Full | |
|
||||
| VMware ESXi | Full | Enable promiscuous mode |
|
||||
| VirtualBox | Full | Bridged network + promiscuous mode |
|
||||
| Docker | Limited | Requires `--privileged --net=host` |
|
||||
| OpenWrt/iStoreOS | Full | Use `--method service` for install |
|
||||
| AWS/Aliyun/Azure | None | Multicast disabled |
|
||||
|
||||
### ⚠️ Limited Support
|
||||
- Private cloud (depends on network configuration)
|
||||
- Docker containers (requires `--privileged` and `--net=host`)
|
||||
- Kubernetes (requires hostNetwork mode)
|
||||
> **Note**: VRRP requires multicast support (224.0.0.18). Most public clouds disable multicast at the network layer. Use cloud-native HA solutions instead.
|
||||
|
||||
### ❌ Not Supported
|
||||
- AWS EC2 (multicast disabled)
|
||||
- Aliyun ECS (multicast disabled)
|
||||
- Azure VM (requires special configuration)
|
||||
- Google Cloud (multicast disabled by default)
|
||||
## Troubleshooting
|
||||
|
||||
**Why?** Public clouds typically disable multicast protocols (224.0.0.18) at the network virtualization layer.
|
||||
### Common Issues
|
||||
|
||||
**Alternative**: Use cloud-native solutions like Elastic IP (AWS), SLB/HaVip (Aliyun), Load Balancer (Azure/GCP).
|
||||
**1. "permission denied" or "operation not permitted"**
|
||||
```bash
|
||||
# VRRP requires root privileges
|
||||
sudo go-alived run -c /etc/go-alived/config.yaml
|
||||
```
|
||||
|
||||
See [deployment/COMPATIBILITY.md](deployment/COMPATIBILITY.md) for detailed compatibility information.
|
||||
**2. "authentication failed"**
|
||||
- Ensure `auth_pass` matches on all nodes
|
||||
- Password is limited to 8 characters
|
||||
|
||||
## Requirements
|
||||
**3. Both nodes become MASTER (split-brain)**
|
||||
- Check network connectivity between nodes
|
||||
- Verify `virtual_router_id` matches
|
||||
- Ensure multicast traffic is allowed
|
||||
|
||||
- Go 1.21+ (for building)
|
||||
- Linux/macOS with root privileges (for raw sockets and interface management)
|
||||
- Network interface with IPv4 address
|
||||
- Multicast support (for VRRP)
|
||||
**4. VIP not pingable after failover**
|
||||
- Gratuitous ARP may be blocked
|
||||
- Check switch/router ARP cache timeout
|
||||
|
||||
## Dependencies
|
||||
### Debug Mode
|
||||
|
||||
Minimal external dependencies:
|
||||
- `github.com/vishvananda/netlink` - Network interface management
|
||||
- `github.com/mdlayher/arp` - ARP packet handling
|
||||
- `github.com/spf13/cobra` - CLI framework
|
||||
- `golang.org/x/net/ipv4` - IPv4 raw socket support
|
||||
- `golang.org/x/net/icmp` - ICMP ping support
|
||||
- `gopkg.in/yaml.v3` - YAML configuration parsing
|
||||
|
||||
## Documentation
|
||||
|
||||
- [USAGE.md](USAGE.md) - Detailed usage guide
|
||||
- [TESTING.md](TESTING.md) - Testing guide
|
||||
- [deployment/README.md](deployment/README.md) - Deployment guide
|
||||
- [deployment/COMPATIBILITY.md](deployment/COMPATIBILITY.md) - Environment compatibility
|
||||
- [roadmap.md](roadmap.md) - Implementation roadmap
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [roadmap.md](roadmap.md) for detailed implementation plan.
|
||||
```bash
|
||||
sudo go-alived run -c /etc/go-alived/config.yaml -d
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
|
||||
15
go.mod
15
go.mod
@@ -1,20 +1,23 @@
|
||||
module github.com/loveuer/go-alived
|
||||
|
||||
go 1.25.0
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
golang.org/x/net v0.47.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/native v1.0.0 // indirect
|
||||
github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 // indirect
|
||||
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect
|
||||
github.com/mdlayher/packet v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.2.1 // indirect
|
||||
github.com/spf13/cobra v1.10.2 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/vishvananda/netlink v1.3.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
4
go.sum
4
go.sum
@@ -1,5 +1,6 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
@@ -25,7 +26,6 @@ github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZla
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
@@ -35,12 +35,12 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
466
internal/cmd/install.go
Normal file
466
internal/cmd/install.go
Normal file
@@ -0,0 +1,466 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBinaryPath = "/usr/local/bin/go-alived"
|
||||
defaultConfigDir = "/etc/go-alived"
|
||||
defaultConfigFile = "/etc/go-alived/config.yaml"
|
||||
systemdServicePath = "/etc/systemd/system/go-alived.service"
|
||||
initdScriptPath = "/etc/init.d/go-alived"
|
||||
)
|
||||
|
||||
var (
|
||||
installMethod string
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
Use: "install",
|
||||
Aliases: []string{"i"},
|
||||
Short: "Install go-alived as a system service",
|
||||
Long: `Install go-alived binary and configuration files to system paths.
|
||||
|
||||
Supported installation methods:
|
||||
- systemd: Install as a systemd service (default, recommended for modern Linux)
|
||||
- service: Install as a SysV init.d service (for older Linux distributions)
|
||||
|
||||
Examples:
|
||||
sudo go-alived install
|
||||
sudo go-alived install --method systemd
|
||||
sudo go-alived i -m service`,
|
||||
Run: runInstall,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(installCmd)
|
||||
|
||||
installCmd.Flags().StringVarP(&installMethod, "method", "m", "systemd",
|
||||
"installation method: systemd, service")
|
||||
}
|
||||
|
||||
func runInstall(cmd *cobra.Command, args []string) {
|
||||
// Check root privileges
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Println("Error: This command requires root privileges")
|
||||
fmt.Println("Please run with: sudo go-alived install")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Validate method
|
||||
method := strings.ToLower(installMethod)
|
||||
if method != "systemd" && method != "service" {
|
||||
fmt.Printf("Error: Invalid installation method '%s'\n", installMethod)
|
||||
fmt.Println("Supported methods: systemd, service")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("=== Go-Alived Installation ===")
|
||||
fmt.Println()
|
||||
|
||||
const totalSteps = 3
|
||||
|
||||
// Step 1: Copy binary
|
||||
if err := installBinary(1, totalSteps); err != nil {
|
||||
fmt.Printf("Error installing binary: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Step 2: Create config directory and file
|
||||
configCreated, err := installConfig(2, totalSteps)
|
||||
if err != nil {
|
||||
fmt.Printf("Error installing config: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Step 3: Install service script
|
||||
if err := installServiceScript(3, totalSteps, method); err != nil {
|
||||
fmt.Printf("Error installing service script: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print completion message
|
||||
printCompletionMessage(method, configCreated)
|
||||
}
|
||||
|
||||
func installBinary(step, total int) error {
|
||||
fmt.Printf("[%d/%d] Installing binary... ", step, total)
|
||||
|
||||
// Get current executable path
|
||||
execPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get executable path: %w", err)
|
||||
}
|
||||
|
||||
// Resolve symlinks
|
||||
execPath, err = filepath.EvalSymlinks(execPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve symlinks: %w", err)
|
||||
}
|
||||
|
||||
// Check if already installed at target path
|
||||
if execPath == defaultBinaryPath {
|
||||
fmt.Println("already installed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open source file
|
||||
src, err := os.Open(execPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source binary: %w", err)
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
// Create destination file
|
||||
dst, err := os.OpenFile(defaultBinaryPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination binary: %w", err)
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
// Copy binary
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return fmt.Errorf("failed to copy binary: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("done (%s)\n", defaultBinaryPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func installConfig(step, total int) (bool, error) {
|
||||
fmt.Printf("[%d/%d] Setting up configuration... ", step, total)
|
||||
|
||||
// Create config directory
|
||||
if err := os.MkdirAll(defaultConfigDir, 0755); err != nil {
|
||||
return false, fmt.Errorf("failed to create config directory: %w", err)
|
||||
}
|
||||
|
||||
// Check if config file already exists
|
||||
if _, err := os.Stat(defaultConfigFile); err == nil {
|
||||
fmt.Println("config already exists")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Generate config content
|
||||
configContent := generateDefaultConfig()
|
||||
|
||||
// Write config file
|
||||
if err := os.WriteFile(defaultConfigFile, []byte(configContent), 0644); err != nil {
|
||||
return false, fmt.Errorf("failed to write config file: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("done (%s)\n", defaultConfigFile)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func installServiceScript(step, total int, method string) error {
|
||||
switch method {
|
||||
case "systemd":
|
||||
return installSystemdService(step, total)
|
||||
case "service":
|
||||
return installInitdScript(step, total)
|
||||
default:
|
||||
return fmt.Errorf("unsupported method: %s", method)
|
||||
}
|
||||
}
|
||||
|
||||
func installSystemdService(step, total int) error {
|
||||
fmt.Printf("[%d/%d] Installing systemd service... ", step, total)
|
||||
|
||||
serviceContent := generateSystemdService()
|
||||
|
||||
if err := os.WriteFile(systemdServicePath, []byte(serviceContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write service file: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("done (%s)\n", systemdServicePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func installInitdScript(step, total int) error {
|
||||
fmt.Printf("[%d/%d] Installing init.d script... ", step, total)
|
||||
|
||||
scriptContent := generateInitdScript()
|
||||
|
||||
if err := os.WriteFile(initdScriptPath, []byte(scriptContent), 0755); err != nil {
|
||||
return fmt.Errorf("failed to write init.d script: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("done (%s)\n", initdScriptPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateDefaultConfig() string {
|
||||
// Auto-detect network interface
|
||||
iface := detectNetworkInterface()
|
||||
hostname, _ := os.Hostname()
|
||||
if hostname == "" {
|
||||
hostname = "node1"
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`# Go-Alived Configuration
|
||||
# Generated by: go-alived install
|
||||
# Documentation: https://github.com/loveuer/go-alived
|
||||
|
||||
global:
|
||||
router_id: "%s"
|
||||
|
||||
vrrp_instances:
|
||||
- name: "VI_1"
|
||||
interface: "%s"
|
||||
state: "BACKUP"
|
||||
virtual_router_id: 51
|
||||
priority: 100
|
||||
advert_interval: 1
|
||||
auth_type: "PASS"
|
||||
auth_pass: "changeme" # TODO: Change this password
|
||||
virtual_ips:
|
||||
- "192.168.1.100/24" # TODO: Change to your VIP
|
||||
|
||||
# Optional: Health checkers
|
||||
# health_checkers:
|
||||
# - name: "check_nginx"
|
||||
# type: "tcp"
|
||||
# interval: 3s
|
||||
# timeout: 2s
|
||||
# rise: 3
|
||||
# fall: 2
|
||||
# config:
|
||||
# host: "127.0.0.1"
|
||||
# port: 80
|
||||
`, hostname, iface)
|
||||
}
|
||||
|
||||
func generateSystemdService() string {
|
||||
return `[Unit]
|
||||
Description=Go-Alived - VRRP High Availability Service
|
||||
Documentation=https://github.com/loveuer/go-alived
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
ExecStart=/usr/local/bin/go-alived run --config /etc/go-alived/config.yaml
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=go-alived
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=false
|
||||
PrivateTmp=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/etc/go-alived
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65535
|
||||
LimitNPROC=512
|
||||
|
||||
# Capabilities required for VRRP operations
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_RAW CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
`
|
||||
}
|
||||
|
||||
func generateInitdScript() string {
|
||||
return `#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: go-alived
|
||||
# Required-Start: $network $remote_fs $syslog
|
||||
# Required-Stop: $network $remote_fs $syslog
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Go-Alived VRRP High Availability Service
|
||||
# Description: Lightweight VRRP implementation for IP high availability
|
||||
### END INIT INFO
|
||||
|
||||
NAME="go-alived"
|
||||
DAEMON="/usr/local/bin/go-alived"
|
||||
DAEMON_ARGS="run --config /etc/go-alived/config.yaml"
|
||||
PIDFILE="/var/run/${NAME}.pid"
|
||||
LOGFILE="/var/log/${NAME}.log"
|
||||
|
||||
[ -x "$DAEMON" ] || exit 5
|
||||
|
||||
start() {
|
||||
if [ -f "$PIDFILE" ] && kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
|
||||
echo "$NAME is already running"
|
||||
return 1
|
||||
fi
|
||||
echo -n "Starting $NAME... "
|
||||
nohup $DAEMON $DAEMON_ARGS >> "$LOGFILE" 2>&1 &
|
||||
echo $! > "$PIDFILE"
|
||||
echo "done (PID: $(cat "$PIDFILE"))"
|
||||
}
|
||||
|
||||
stop() {
|
||||
if [ ! -f "$PIDFILE" ] || ! kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
|
||||
echo "$NAME is not running"
|
||||
return 1
|
||||
fi
|
||||
echo -n "Stopping $NAME... "
|
||||
kill "$(cat "$PIDFILE")"
|
||||
rm -f "$PIDFILE"
|
||||
echo "done"
|
||||
}
|
||||
|
||||
restart() {
|
||||
stop
|
||||
sleep 1
|
||||
start
|
||||
}
|
||||
|
||||
reload() {
|
||||
if [ ! -f "$PIDFILE" ] || ! kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
|
||||
echo "$NAME is not running"
|
||||
return 1
|
||||
fi
|
||||
echo -n "Reloading $NAME configuration... "
|
||||
kill -HUP "$(cat "$PIDFILE")"
|
||||
echo "done"
|
||||
}
|
||||
|
||||
status() {
|
||||
if [ -f "$PIDFILE" ] && kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
|
||||
echo "$NAME is running (PID: $(cat "$PIDFILE"))"
|
||||
else
|
||||
echo "$NAME is not running"
|
||||
[ -f "$PIDFILE" ] && rm -f "$PIDFILE"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start) start ;;
|
||||
stop) stop ;;
|
||||
restart) restart ;;
|
||||
reload) reload ;;
|
||||
status) status ;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|reload|status}"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $?
|
||||
`
|
||||
}
|
||||
|
||||
func detectNetworkInterface() string {
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return "eth0"
|
||||
}
|
||||
|
||||
for _, iface := range interfaces {
|
||||
// Skip loopback and down interfaces
|
||||
if iface.Flags&net.FlagLoopback != 0 {
|
||||
continue
|
||||
}
|
||||
if iface.Flags&net.FlagUp == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if interface has IPv4 address
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if ipNet, ok := addr.(*net.IPNet); ok {
|
||||
if ipv4 := ipNet.IP.To4(); ipv4 != nil && !ipv4.IsLoopback() {
|
||||
return iface.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "eth0"
|
||||
}
|
||||
|
||||
func printCompletionMessage(method string, configCreated bool) {
|
||||
fmt.Println()
|
||||
fmt.Println("=== Installation Complete ===")
|
||||
fmt.Println()
|
||||
|
||||
// Installed files summary
|
||||
fmt.Println(">>> Installed Files:")
|
||||
fmt.Printf(" Binary: %s\n", defaultBinaryPath)
|
||||
fmt.Printf(" Config: %s\n", defaultConfigFile)
|
||||
if method == "systemd" {
|
||||
fmt.Printf(" Service: %s\n", systemdServicePath)
|
||||
} else {
|
||||
fmt.Printf(" Service: %s\n", initdScriptPath)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// What needs to be modified
|
||||
fmt.Println(">>> Configuration Required:")
|
||||
fmt.Printf(" Edit: %s\n", defaultConfigFile)
|
||||
fmt.Println()
|
||||
if configCreated {
|
||||
fmt.Println(" Modify the following settings:")
|
||||
fmt.Println(" - auth_pass: Change 'changeme' to a secure password")
|
||||
fmt.Println(" - virtual_ips: Set your Virtual IP address(es)")
|
||||
fmt.Println(" - interface: Verify the network interface is correct")
|
||||
fmt.Println(" - priority: Adjust based on node role (higher = more likely master)")
|
||||
} else {
|
||||
fmt.Println(" Review your existing configuration")
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// How to start
|
||||
fmt.Println(">>> Next Steps:")
|
||||
if method == "systemd" {
|
||||
fmt.Println(" 1. Edit configuration:")
|
||||
fmt.Printf(" sudo vim %s\n", defaultConfigFile)
|
||||
fmt.Println()
|
||||
fmt.Println(" 2. Reload systemd and start service:")
|
||||
fmt.Println(" sudo systemctl daemon-reload")
|
||||
fmt.Println(" sudo systemctl enable go-alived")
|
||||
fmt.Println(" sudo systemctl start go-alived")
|
||||
fmt.Println()
|
||||
fmt.Println(" 3. Check service status:")
|
||||
fmt.Println(" sudo systemctl status go-alived")
|
||||
fmt.Println(" sudo journalctl -u go-alived -f")
|
||||
} else {
|
||||
fmt.Println(" 1. Edit configuration:")
|
||||
fmt.Printf(" sudo vim %s\n", defaultConfigFile)
|
||||
fmt.Println()
|
||||
fmt.Println(" 2. Start service:")
|
||||
fmt.Printf(" sudo %s start\n", initdScriptPath)
|
||||
fmt.Println()
|
||||
fmt.Println(" 3. Enable on boot (Debian/Ubuntu):")
|
||||
fmt.Println(" sudo update-rc.d go-alived defaults")
|
||||
fmt.Println()
|
||||
fmt.Println(" 4. Check service status:")
|
||||
fmt.Printf(" sudo %s status\n", initdScriptPath)
|
||||
fmt.Printf(" tail -f /var/log/go-alived.log\n")
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Test environment
|
||||
fmt.Println(">>> Test Environment (Optional):")
|
||||
fmt.Printf(" sudo %s test\n", defaultBinaryPath)
|
||||
fmt.Println()
|
||||
}
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Version can be set at build time via ldflags
|
||||
var Version = "1.2.1"
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "go-alived",
|
||||
Short: "Go-Alived - VRRP High Availability Service",
|
||||
Long: `go-alived is a lightweight, dependency-free VRRP implementation in Go.
|
||||
It provides high availability for IP addresses with health checking support.`,
|
||||
Version: "1.0.0",
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
@@ -21,5 +23,6 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.Version = Version
|
||||
rootCmd.CompletionOptions.DisableDefaultCmd = true
|
||||
}
|
||||
@@ -367,7 +367,7 @@ func (t *EnvironmentTest) TestCloudEnvironment() {
|
||||
if err == nil {
|
||||
cloudDetected = true
|
||||
t.AddResult("云环境", !test.isFatal, fmt.Sprintf("检测到%s环境", test.name), test.isFatal)
|
||||
t.log.Warn(test.solution)
|
||||
t.log.Warn("%s", test.solution)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,9 +8,13 @@ import (
|
||||
)
|
||||
|
||||
func CreateChecker(cfg *config.HealthChecker) (Checker, error) {
|
||||
if cfg.Config == nil {
|
||||
return nil, fmt.Errorf("missing config for checker %s", cfg.Name)
|
||||
}
|
||||
|
||||
configMap, ok := cfg.Config.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid config for checker %s", cfg.Name)
|
||||
return nil, fmt.Errorf("invalid config type for checker %s: expected map[string]interface{}", cfg.Name)
|
||||
}
|
||||
|
||||
switch cfg.Type {
|
||||
@@ -36,6 +40,7 @@ func LoadFromConfig(cfg *config.Config, log *logger.Logger) (*Manager, error) {
|
||||
return nil, fmt.Errorf("failed to create checker %s: %w", healthCfg.Name, err)
|
||||
}
|
||||
|
||||
configMap, _ := healthCfg.Config.(map[string]interface{})
|
||||
monitorCfg := &CheckerConfig{
|
||||
Name: healthCfg.Name,
|
||||
Type: healthCfg.Type,
|
||||
@@ -43,7 +48,7 @@ func LoadFromConfig(cfg *config.Config, log *logger.Logger) (*Manager, error) {
|
||||
Timeout: healthCfg.Timeout,
|
||||
Rise: healthCfg.Rise,
|
||||
Fall: healthCfg.Fall,
|
||||
Config: healthCfg.Config.(map[string]interface{}),
|
||||
Config: configMap,
|
||||
}
|
||||
|
||||
monitor := NewMonitor(checker, monitorCfg, log)
|
||||
|
||||
74
internal/health/manager.go
Normal file
74
internal/health/manager.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/loveuer/go-alived/pkg/logger"
|
||||
)
|
||||
|
||||
// Manager manages multiple health check monitors.
|
||||
type Manager struct {
|
||||
monitors map[string]*Monitor
|
||||
mu sync.RWMutex
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewManager creates a new health check Manager.
|
||||
func NewManager(log *logger.Logger) *Manager {
|
||||
return &Manager{
|
||||
monitors: make(map[string]*Monitor),
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// AddMonitor adds a monitor to the manager.
|
||||
func (m *Manager) AddMonitor(monitor *Monitor) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.monitors[monitor.config.Name] = monitor
|
||||
}
|
||||
|
||||
// GetMonitor retrieves a monitor by name.
|
||||
func (m *Manager) GetMonitor(name string) (*Monitor, bool) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
monitor, ok := m.monitors[name]
|
||||
return monitor, ok
|
||||
}
|
||||
|
||||
// StartAll starts all registered monitors.
|
||||
func (m *Manager) StartAll() {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for _, monitor := range m.monitors {
|
||||
monitor.Start()
|
||||
}
|
||||
|
||||
m.log.Info("started %d health check monitor(s)", len(m.monitors))
|
||||
}
|
||||
|
||||
// StopAll stops all registered monitors.
|
||||
func (m *Manager) StopAll() {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for _, monitor := range m.monitors {
|
||||
monitor.Stop()
|
||||
}
|
||||
|
||||
m.log.Info("stopped all health check monitors")
|
||||
}
|
||||
|
||||
// GetAllStates returns the current state of all monitors.
|
||||
func (m *Manager) GetAllStates() map[string]*CheckerState {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
states := make(map[string]*CheckerState)
|
||||
for name, monitor := range m.monitors {
|
||||
states[name] = monitor.GetState()
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/loveuer/go-alived/pkg/logger"
|
||||
)
|
||||
|
||||
// Monitor runs periodic health checks and tracks state.
|
||||
type Monitor struct {
|
||||
checker Checker
|
||||
config *CheckerConfig
|
||||
@@ -21,6 +22,7 @@ type Monitor struct {
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMonitor creates a new Monitor for the given checker.
|
||||
func NewMonitor(checker Checker, config *CheckerConfig, log *logger.Logger) *Monitor {
|
||||
return &Monitor{
|
||||
checker: checker,
|
||||
@@ -35,6 +37,7 @@ func NewMonitor(checker Checker, config *CheckerConfig, log *logger.Logger) *Mon
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the health check loop.
|
||||
func (m *Monitor) Start() {
|
||||
m.mu.Lock()
|
||||
if m.running {
|
||||
@@ -51,6 +54,7 @@ func (m *Monitor) Start() {
|
||||
go m.checkLoop()
|
||||
}
|
||||
|
||||
// Stop stops the health check loop.
|
||||
func (m *Monitor) Stop() {
|
||||
m.mu.Lock()
|
||||
if !m.running {
|
||||
@@ -71,6 +75,7 @@ func (m *Monitor) checkLoop() {
|
||||
ticker := time.NewTicker(m.config.Interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Perform initial check immediately
|
||||
m.performCheck()
|
||||
|
||||
for {
|
||||
@@ -95,7 +100,8 @@ func (m *Monitor) performCheck() {
|
||||
oldHealthy := m.state.Healthy
|
||||
stateChanged := m.state.Update(result, m.config.Rise, m.config.Fall)
|
||||
newHealthy := m.state.Healthy
|
||||
callbacks := m.callbacks
|
||||
callbacks := make([]StateChangeCallback, len(m.callbacks))
|
||||
copy(callbacks, m.callbacks)
|
||||
m.mu.Unlock()
|
||||
|
||||
m.log.Debug("[HealthCheck:%s] check completed: result=%s, duration=%s, healthy=%v",
|
||||
@@ -111,12 +117,14 @@ func (m *Monitor) performCheck() {
|
||||
}
|
||||
}
|
||||
|
||||
// OnStateChange registers a callback for health state changes.
|
||||
func (m *Monitor) OnStateChange(callback StateChangeCallback) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.callbacks = append(m.callbacks, callback)
|
||||
}
|
||||
|
||||
// GetState returns a copy of the current checker state.
|
||||
func (m *Monitor) GetState() *CheckerState {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
@@ -125,68 +133,9 @@ func (m *Monitor) GetState() *CheckerState {
|
||||
return &stateCopy
|
||||
}
|
||||
|
||||
// IsHealthy returns whether the checker is currently healthy.
|
||||
func (m *Monitor) IsHealthy() bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.state.Healthy
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
monitors map[string]*Monitor
|
||||
mu sync.RWMutex
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func NewManager(log *logger.Logger) *Manager {
|
||||
return &Manager{
|
||||
monitors: make(map[string]*Monitor),
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) AddMonitor(monitor *Monitor) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.monitors[monitor.config.Name] = monitor
|
||||
}
|
||||
|
||||
func (m *Manager) GetMonitor(name string) (*Monitor, bool) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
monitor, ok := m.monitors[name]
|
||||
return monitor, ok
|
||||
}
|
||||
|
||||
func (m *Manager) StartAll() {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for _, monitor := range m.monitors {
|
||||
monitor.Start()
|
||||
}
|
||||
|
||||
m.log.Info("started %d health check monitor(s)", len(m.monitors))
|
||||
}
|
||||
|
||||
func (m *Manager) StopAll() {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
for _, monitor := range m.monitors {
|
||||
monitor.Stop()
|
||||
}
|
||||
|
||||
m.log.Info("stopped all health check monitors")
|
||||
}
|
||||
|
||||
func (m *Manager) GetAllStates() map[string]*CheckerState {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
states := make(map[string]*CheckerState)
|
||||
for name, monitor := range m.monitors {
|
||||
states[name] = monitor.GetState()
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
95
internal/vrrp/history.go
Normal file
95
internal/vrrp/history.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package vrrp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StateTransition represents a single state transition event.
|
||||
type StateTransition struct {
|
||||
From State
|
||||
To State
|
||||
Timestamp time.Time
|
||||
Reason string
|
||||
}
|
||||
|
||||
// StateHistory maintains a bounded history of state transitions.
|
||||
type StateHistory struct {
|
||||
transitions []StateTransition
|
||||
maxSize int
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewStateHistory creates a new StateHistory with the specified maximum size.
|
||||
func NewStateHistory(maxSize int) *StateHistory {
|
||||
return &StateHistory{
|
||||
transitions: make([]StateTransition, 0, maxSize),
|
||||
maxSize: maxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Add records a new state transition.
|
||||
func (sh *StateHistory) Add(from, to State, reason string) {
|
||||
sh.mu.Lock()
|
||||
defer sh.mu.Unlock()
|
||||
|
||||
transition := StateTransition{
|
||||
From: from,
|
||||
To: to,
|
||||
Timestamp: time.Now(),
|
||||
Reason: reason,
|
||||
}
|
||||
|
||||
sh.transitions = append(sh.transitions, transition)
|
||||
|
||||
// Maintain bounded size using ring buffer style
|
||||
if len(sh.transitions) > sh.maxSize {
|
||||
// Copy to new slice to allow garbage collection of old backing array
|
||||
newTransitions := make([]StateTransition, len(sh.transitions)-1, sh.maxSize)
|
||||
copy(newTransitions, sh.transitions[1:])
|
||||
sh.transitions = newTransitions
|
||||
}
|
||||
}
|
||||
|
||||
// GetRecent returns the most recent n transitions.
|
||||
func (sh *StateHistory) GetRecent(n int) []StateTransition {
|
||||
sh.mu.RLock()
|
||||
defer sh.mu.RUnlock()
|
||||
|
||||
if n > len(sh.transitions) {
|
||||
n = len(sh.transitions)
|
||||
}
|
||||
|
||||
start := len(sh.transitions) - n
|
||||
result := make([]StateTransition, n)
|
||||
copy(result, sh.transitions[start:])
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Len returns the number of recorded transitions.
|
||||
func (sh *StateHistory) Len() int {
|
||||
sh.mu.RLock()
|
||||
defer sh.mu.RUnlock()
|
||||
return len(sh.transitions)
|
||||
}
|
||||
|
||||
// String returns a formatted string representation of the history.
|
||||
func (sh *StateHistory) String() string {
|
||||
sh.mu.RLock()
|
||||
defer sh.mu.RUnlock()
|
||||
|
||||
if len(sh.transitions) == 0 {
|
||||
return "No state transitions"
|
||||
}
|
||||
|
||||
result := "State transition history:\n"
|
||||
for _, t := range sh.transitions {
|
||||
result += fmt.Sprintf(" %s: %s -> %s (%s)\n",
|
||||
t.Timestamp.Format("2006-01-02 15:04:05"),
|
||||
t.From, t.To, t.Reason)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -17,6 +17,7 @@ type Instance struct {
|
||||
AdvertInterval uint8
|
||||
Interface string
|
||||
VirtualIPs []net.IP
|
||||
VirtualIPCIDRs []string // preserve original CIDR notation
|
||||
AuthType uint8
|
||||
AuthPass string
|
||||
TrackScripts []string
|
||||
@@ -64,12 +65,14 @@ func NewInstance(
|
||||
}
|
||||
|
||||
virtualIPs := make([]net.IP, 0, len(vips))
|
||||
virtualIPCIDRs := make([]string, 0, len(vips))
|
||||
for _, vip := range vips {
|
||||
ip, _, err := net.ParseCIDR(vip)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid VIP %s: %w", vip, err)
|
||||
}
|
||||
virtualIPs = append(virtualIPs, ip)
|
||||
virtualIPCIDRs = append(virtualIPCIDRs, vip)
|
||||
}
|
||||
|
||||
var authTypeNum uint8
|
||||
@@ -94,6 +97,7 @@ func NewInstance(
|
||||
AdvertInterval: advertInt,
|
||||
Interface: iface,
|
||||
VirtualIPs: virtualIPs,
|
||||
VirtualIPCIDRs: virtualIPCIDRs,
|
||||
AuthType: authTypeNum,
|
||||
AuthPass: authPass,
|
||||
TrackScripts: trackScripts,
|
||||
@@ -192,8 +196,15 @@ func (inst *Instance) receiveLoop() {
|
||||
default:
|
||||
}
|
||||
|
||||
// Set read deadline to allow periodic check of stop channel
|
||||
inst.socket.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
||||
|
||||
pkt, srcIP, err := inst.socket.Receive()
|
||||
if err != nil {
|
||||
// Check if it's a timeout error, which is expected
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
continue
|
||||
}
|
||||
inst.log.Debug("[%s] failed to receive packet: %v", inst.Name, err)
|
||||
continue
|
||||
}
|
||||
@@ -371,11 +382,7 @@ func (inst *Instance) removeVIPs() error {
|
||||
}
|
||||
|
||||
func (inst *Instance) getVIPsWithCIDR() []string {
|
||||
result := make([]string, len(inst.VirtualIPs))
|
||||
for i, ip := range inst.VirtualIPs {
|
||||
result[i] = ip.String() + "/32"
|
||||
}
|
||||
return result
|
||||
return inst.VirtualIPCIDRs
|
||||
}
|
||||
|
||||
func (inst *Instance) GetState() State {
|
||||
@@ -399,15 +406,17 @@ func (inst *Instance) AdjustPriority(delta int) {
|
||||
defer inst.mu.Unlock()
|
||||
|
||||
oldPriority := inst.priorityCalc.GetPriority()
|
||||
|
||||
|
||||
if delta < 0 {
|
||||
inst.priorityCalc.DecreasePriority(uint8(-delta))
|
||||
} else if delta > 0 {
|
||||
inst.priorityCalc.IncreasePriority(uint8(delta))
|
||||
}
|
||||
|
||||
|
||||
newPriority := inst.priorityCalc.GetPriority()
|
||||
|
||||
|
||||
if oldPriority != newPriority {
|
||||
inst.log.Info("[%s] priority adjusted: %d -> %d (delta=%d)",
|
||||
inst.log.Info("[%s] priority adjusted: %d -> %d (delta=%d)",
|
||||
inst.Name, oldPriority, newPriority, delta)
|
||||
}
|
||||
}
|
||||
|
||||
99
internal/vrrp/priority.go
Normal file
99
internal/vrrp/priority.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package vrrp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PriorityCalculator manages VRRP priority with support for dynamic adjustment.
|
||||
type PriorityCalculator struct {
|
||||
basePriority uint8
|
||||
currentPriority uint8
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewPriorityCalculator creates a new PriorityCalculator with the specified base priority.
|
||||
func NewPriorityCalculator(basePriority uint8) *PriorityCalculator {
|
||||
return &PriorityCalculator{
|
||||
basePriority: basePriority,
|
||||
currentPriority: basePriority,
|
||||
}
|
||||
}
|
||||
|
||||
// GetPriority returns the current priority.
|
||||
func (pc *PriorityCalculator) GetPriority() uint8 {
|
||||
pc.mu.RLock()
|
||||
defer pc.mu.RUnlock()
|
||||
return pc.currentPriority
|
||||
}
|
||||
|
||||
// DecreasePriority decreases the current priority by the specified amount.
|
||||
// The priority will not go below 0.
|
||||
func (pc *PriorityCalculator) DecreasePriority(amount uint8) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
if pc.currentPriority > amount {
|
||||
pc.currentPriority -= amount
|
||||
} else {
|
||||
pc.currentPriority = 0
|
||||
}
|
||||
}
|
||||
|
||||
// IncreasePriority increases the current priority by the specified amount.
|
||||
// The priority will not exceed 255 or the base priority.
|
||||
func (pc *PriorityCalculator) IncreasePriority(amount uint8) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
newPriority := pc.currentPriority + amount
|
||||
if newPriority > pc.basePriority {
|
||||
newPriority = pc.basePriority
|
||||
}
|
||||
if newPriority < pc.currentPriority { // overflow check
|
||||
newPriority = pc.basePriority
|
||||
}
|
||||
pc.currentPriority = newPriority
|
||||
}
|
||||
|
||||
// ResetPriority resets the priority to the base value.
|
||||
func (pc *PriorityCalculator) ResetPriority() {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
pc.currentPriority = pc.basePriority
|
||||
}
|
||||
|
||||
// SetBasePriority sets a new base priority and resets current priority to match.
|
||||
func (pc *PriorityCalculator) SetBasePriority(priority uint8) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
pc.basePriority = priority
|
||||
pc.currentPriority = priority
|
||||
}
|
||||
|
||||
// ShouldBecomeMaster determines if the local node should become master
|
||||
// based on priority comparison and IP address tie-breaking.
|
||||
func ShouldBecomeMaster(localPriority, remotePriority uint8, localIP, remoteIP string) bool {
|
||||
if localPriority > remotePriority {
|
||||
return true
|
||||
}
|
||||
|
||||
if localPriority == remotePriority {
|
||||
return localIP > remoteIP
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CalculateMasterDownInterval calculates the master down interval
|
||||
// according to VRRP specification: (3 * Advertisement_Interval).
|
||||
func CalculateMasterDownInterval(advertInt uint8) time.Duration {
|
||||
return time.Duration(3*int(advertInt)) * time.Second
|
||||
}
|
||||
|
||||
// CalculateSkewTime calculates the skew time for master down timer
|
||||
// according to VRRP specification: ((256 - Priority) / 256).
|
||||
func CalculateSkewTime(priority uint8) time.Duration {
|
||||
skew := float64(256-int(priority)) / 256.0
|
||||
return time.Duration(skew * float64(time.Second))
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
@@ -14,10 +15,11 @@ const (
|
||||
)
|
||||
|
||||
type Socket struct {
|
||||
conn *ipv4.RawConn
|
||||
iface *net.Interface
|
||||
localIP net.IP
|
||||
groupIP net.IP
|
||||
conn *ipv4.RawConn
|
||||
packetConn net.PacketConn
|
||||
iface *net.Interface
|
||||
localIP net.IP
|
||||
groupIP net.IP
|
||||
}
|
||||
|
||||
func NewSocket(ifaceName string) (*Socket, error) {
|
||||
@@ -56,9 +58,8 @@ func NewSocket(ifaceName string) (*Socket, error) {
|
||||
}
|
||||
|
||||
file := os.NewFile(uintptr(fd), "vrrp-socket")
|
||||
defer file.Close()
|
||||
|
||||
packetConn, err := net.FilePacketConn(file)
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create packet connection: %w", err)
|
||||
}
|
||||
@@ -86,10 +87,11 @@ func NewSocket(ifaceName string) (*Socket, error) {
|
||||
}
|
||||
|
||||
return &Socket{
|
||||
conn: rawConn,
|
||||
iface: iface,
|
||||
localIP: localIP,
|
||||
groupIP: groupIP,
|
||||
conn: rawConn,
|
||||
packetConn: packetConn,
|
||||
iface: iface,
|
||||
localIP: localIP,
|
||||
groupIP: groupIP,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -133,6 +135,10 @@ func (s *Socket) Receive() (*VRRPPacket, net.IP, error) {
|
||||
return pkt, header.Src, nil
|
||||
}
|
||||
|
||||
func (s *Socket) SetReadDeadline(t time.Time) error {
|
||||
return s.packetConn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
func (s *Socket) Close() error {
|
||||
if err := s.conn.LeaveGroup(s.iface, &net.IPAddr{IP: s.groupIP}); err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package vrrp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
import "sync"
|
||||
|
||||
// State represents the VRRP instance state.
|
||||
type State int
|
||||
|
||||
const (
|
||||
@@ -15,6 +12,7 @@ const (
|
||||
StateFault
|
||||
)
|
||||
|
||||
// String returns the string representation of the state.
|
||||
func (s State) String() string {
|
||||
switch s {
|
||||
case StateInit:
|
||||
@@ -30,33 +28,39 @@ func (s State) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// StateMachine manages VRRP state transitions with thread-safe callbacks.
|
||||
type StateMachine struct {
|
||||
currentState State
|
||||
previousState State
|
||||
mu sync.RWMutex
|
||||
currentState State
|
||||
mu sync.RWMutex
|
||||
stateChangeCallbacks []func(old, new State)
|
||||
}
|
||||
|
||||
// NewStateMachine creates a new StateMachine with the specified initial state.
|
||||
func NewStateMachine(initialState State) *StateMachine {
|
||||
return &StateMachine{
|
||||
currentState: initialState,
|
||||
previousState: StateInit,
|
||||
currentState: initialState,
|
||||
stateChangeCallbacks: make([]func(old, new State), 0),
|
||||
}
|
||||
}
|
||||
|
||||
// GetState returns the current state.
|
||||
func (sm *StateMachine) GetState() State {
|
||||
sm.mu.RLock()
|
||||
defer sm.mu.RUnlock()
|
||||
return sm.currentState
|
||||
}
|
||||
|
||||
// SetState transitions to a new state and triggers registered callbacks.
|
||||
func (sm *StateMachine) SetState(newState State) {
|
||||
sm.mu.Lock()
|
||||
oldState := sm.currentState
|
||||
sm.previousState = oldState
|
||||
if oldState == newState {
|
||||
sm.mu.Unlock()
|
||||
return
|
||||
}
|
||||
sm.currentState = newState
|
||||
callbacks := sm.stateChangeCallbacks
|
||||
callbacks := make([]func(old, new State), len(sm.stateChangeCallbacks))
|
||||
copy(callbacks, sm.stateChangeCallbacks)
|
||||
sm.mu.Unlock()
|
||||
|
||||
for _, callback := range callbacks {
|
||||
@@ -64,195 +68,9 @@ func (sm *StateMachine) SetState(newState State) {
|
||||
}
|
||||
}
|
||||
|
||||
// OnStateChange registers a callback to be invoked on state changes.
|
||||
func (sm *StateMachine) OnStateChange(callback func(old, new State)) {
|
||||
sm.mu.Lock()
|
||||
defer sm.mu.Unlock()
|
||||
sm.stateChangeCallbacks = append(sm.stateChangeCallbacks, callback)
|
||||
}
|
||||
|
||||
type Timer struct {
|
||||
duration time.Duration
|
||||
timer *time.Timer
|
||||
callback func()
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewTimer(duration time.Duration, callback func()) *Timer {
|
||||
return &Timer{
|
||||
duration: duration,
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timer) Start() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
|
||||
t.timer = time.AfterFunc(t.duration, t.callback)
|
||||
}
|
||||
|
||||
func (t *Timer) Stop() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
t.timer = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Timer) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
|
||||
t.timer = time.AfterFunc(t.duration, t.callback)
|
||||
}
|
||||
|
||||
func (t *Timer) SetDuration(duration time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.duration = duration
|
||||
}
|
||||
|
||||
type PriorityCalculator struct {
|
||||
basePriority uint8
|
||||
currentPriority uint8
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewPriorityCalculator(basePriority uint8) *PriorityCalculator {
|
||||
return &PriorityCalculator{
|
||||
basePriority: basePriority,
|
||||
currentPriority: basePriority,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PriorityCalculator) GetPriority() uint8 {
|
||||
pc.mu.RLock()
|
||||
defer pc.mu.RUnlock()
|
||||
return pc.currentPriority
|
||||
}
|
||||
|
||||
func (pc *PriorityCalculator) DecreasePriority(amount uint8) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
|
||||
if pc.currentPriority > amount {
|
||||
pc.currentPriority -= amount
|
||||
} else {
|
||||
pc.currentPriority = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PriorityCalculator) ResetPriority() {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
pc.currentPriority = pc.basePriority
|
||||
}
|
||||
|
||||
func (pc *PriorityCalculator) SetBasePriority(priority uint8) {
|
||||
pc.mu.Lock()
|
||||
defer pc.mu.Unlock()
|
||||
pc.basePriority = priority
|
||||
pc.currentPriority = priority
|
||||
}
|
||||
|
||||
func ShouldBecomeMaster(localPriority, remotePriority uint8, localIP, remoteIP string) bool {
|
||||
if localPriority > remotePriority {
|
||||
return true
|
||||
}
|
||||
|
||||
if localPriority == remotePriority {
|
||||
return localIP > remoteIP
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func CalculateMasterDownInterval(advertInt uint8) time.Duration {
|
||||
return time.Duration(3*int(advertInt)) * time.Second
|
||||
}
|
||||
|
||||
func CalculateSkewTime(priority uint8) time.Duration {
|
||||
skew := float64(256-int(priority)) / 256.0
|
||||
return time.Duration(skew * float64(time.Second))
|
||||
}
|
||||
|
||||
type StateTransition struct {
|
||||
From State
|
||||
To State
|
||||
Timestamp time.Time
|
||||
Reason string
|
||||
}
|
||||
|
||||
type StateHistory struct {
|
||||
transitions []StateTransition
|
||||
maxSize int
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewStateHistory(maxSize int) *StateHistory {
|
||||
return &StateHistory{
|
||||
transitions: make([]StateTransition, 0, maxSize),
|
||||
maxSize: maxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *StateHistory) Add(from, to State, reason string) {
|
||||
sh.mu.Lock()
|
||||
defer sh.mu.Unlock()
|
||||
|
||||
transition := StateTransition{
|
||||
From: from,
|
||||
To: to,
|
||||
Timestamp: time.Now(),
|
||||
Reason: reason,
|
||||
}
|
||||
|
||||
sh.transitions = append(sh.transitions, transition)
|
||||
|
||||
if len(sh.transitions) > sh.maxSize {
|
||||
sh.transitions = sh.transitions[1:]
|
||||
}
|
||||
}
|
||||
|
||||
func (sh *StateHistory) GetRecent(n int) []StateTransition {
|
||||
sh.mu.RLock()
|
||||
defer sh.mu.RUnlock()
|
||||
|
||||
if n > len(sh.transitions) {
|
||||
n = len(sh.transitions)
|
||||
}
|
||||
|
||||
start := len(sh.transitions) - n
|
||||
result := make([]StateTransition, n)
|
||||
copy(result, sh.transitions[start:])
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (sh *StateHistory) String() string {
|
||||
sh.mu.RLock()
|
||||
defer sh.mu.RUnlock()
|
||||
|
||||
if len(sh.transitions) == 0 {
|
||||
return "No state transitions"
|
||||
}
|
||||
|
||||
result := "State transition history:\n"
|
||||
for _, t := range sh.transitions {
|
||||
result += fmt.Sprintf(" %s: %s -> %s (%s)\n",
|
||||
t.Timestamp.Format("2006-01-02 15:04:05"),
|
||||
t.From, t.To, t.Reason)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
64
internal/vrrp/timer.go
Normal file
64
internal/vrrp/timer.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package vrrp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Timer provides a thread-safe timer with callback support.
|
||||
type Timer struct {
|
||||
duration time.Duration
|
||||
timer *time.Timer
|
||||
callback func()
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewTimer creates a new Timer with the specified duration and callback.
|
||||
func NewTimer(duration time.Duration, callback func()) *Timer {
|
||||
return &Timer{
|
||||
duration: duration,
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts or restarts the timer.
|
||||
func (t *Timer) Start() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
|
||||
t.timer = time.AfterFunc(t.duration, t.callback)
|
||||
}
|
||||
|
||||
// Stop stops the timer if it's running.
|
||||
func (t *Timer) Stop() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
t.timer = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Reset stops the current timer and starts a new one with the same duration.
|
||||
func (t *Timer) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
|
||||
t.timer = time.AfterFunc(t.duration, t.callback)
|
||||
}
|
||||
|
||||
// SetDuration updates the timer's duration for future starts.
|
||||
func (t *Timer) SetDuration(duration time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.duration = duration
|
||||
}
|
||||
Reference in New Issue
Block a user