feat: 🏡 make apps

This commit is contained in:
zhaoyupeng
2025-11-26 16:17:38 +08:00
parent 1d3c159c00
commit 4ec58ce4e5
32 changed files with 1856 additions and 107 deletions

BIN
forge

Binary file not shown.

View File

@@ -52,7 +52,6 @@ func makeCmd() *cobra.Command {
_cmd.AddCommand( _cmd.AddCommand(
makecmd.Images(), makecmd.Images(),
makecmd.Binaries(), makecmd.Binaries(),
makecmd.Debs(),
makecmd.Flannel(), makecmd.Flannel(),
makecmd.Longhorn(), makecmd.Longhorn(),
makecmd.Mysql(), makecmd.Mysql(),
@@ -63,6 +62,8 @@ func makeCmd() *cobra.Command {
makecmd.LessDNS(), makecmd.LessDNS(),
makecmd.HSNet(), makecmd.HSNet(),
makecmd.ConfigMap(), makecmd.ConfigMap(),
makecmd.Proxy(),
makecmd.Seafile(),
makecmd.App(), makecmd.App(),
) )

View File

@@ -2,6 +2,7 @@ package makecmd
import ( import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"yizhisec.com/hsv2/forge/internal/controller/maker"
) )
func App() *cobra.Command { func App() *cobra.Command {
@@ -12,7 +13,108 @@ func App() *cobra.Command {
_cmd.AddCommand( _cmd.AddCommand(
appUser(), appUser(),
appClient(),
appGateway(),
appMie(),
appOEM(),
) )
return _cmd return _cmd
} }
func appUser() *cobra.Command {
var (
replica int
)
_cmd := &cobra.Command{
Use: "user",
Short: "Make User App",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.AppUser(cmd.Context(), replica)
},
}
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
return _cmd
}
func appClient() *cobra.Command {
var (
replica int
)
_cmd := &cobra.Command{
Use: "client",
Short: "Make Client App",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.AppClient(cmd.Context(), replica)
},
}
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
return _cmd
}
func appGateway() *cobra.Command {
var (
replica int
)
_cmd := &cobra.Command{
Use: "gateway",
Short: "Make Gateway App",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.AppGateway(cmd.Context(), replica)
},
}
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
return _cmd
}
func appMie() *cobra.Command {
var (
replica int
)
_cmd := &cobra.Command{
Use: "mie",
Short: "Make Mie App",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.AppMie(cmd.Context(), replica)
},
}
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
return _cmd
}
func appOEM() *cobra.Command {
var (
replica int
vendor string
)
_cmd := &cobra.Command{
Use: "oem",
Short: "Make OEM App",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.AppOEM(cmd.Context(), replica, vendor)
},
}
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
_cmd.Flags().StringVar(&vendor, "vendor", "standard", "Vendor name")
return _cmd
}

View File

@@ -1,21 +0,0 @@
package makecmd
import (
"github.com/spf13/cobra"
"yizhisec.com/hsv2/forge/internal/controller/maker"
)
func Debs() *cobra.Command {
cmd := &cobra.Command{
Use: "debs",
Aliases: []string{"deb"},
Short: "Build Debian packages",
Long: `Build all required Debian packages for the project.`,
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.Deb(cmd.Context())
},
}
return cmd
}

View File

@@ -5,21 +5,15 @@ import (
"yizhisec.com/hsv2/forge/internal/controller/maker" "yizhisec.com/hsv2/forge/internal/controller/maker"
) )
func appUser() *cobra.Command { func Proxy() *cobra.Command {
var (
replica int
)
_cmd := &cobra.Command{ _cmd := &cobra.Command{
Use: "user", Use: "proxy",
Short: "Make User App", Short: "Make Proxy(by caddy)",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker() mk := maker.NewMaker()
return mk.AppUser(cmd.Context(), replica) return mk.Proxy(cmd.Context())
}, },
} }
_cmd.Flags().IntVar(&replica, "replica-count", 2, "Replica count")
return _cmd return _cmd
} }

View File

@@ -0,0 +1,26 @@
package makecmd
import (
"github.com/spf13/cobra"
"yizhisec.com/hsv2/forge/internal/controller/maker"
)
func Seafile() *cobra.Command {
var (
storage int
)
_cmd := &cobra.Command{
Use: "seafile",
Short: "make seafile dependency",
RunE: func(cmd *cobra.Command, args []string) error {
mk := maker.NewMaker()
return mk.Seafile(cmd.Context())
},
}
_cmd.Flags().IntVar(&storage, "storage-size", 50, "指定 seafile 空间大小(单位GB)")
return _cmd
}

View File

@@ -0,0 +1,246 @@
package maker
import (
"context"
"fmt"
"os"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/resource"
)
func (m *maker) AppClient(ctx context.Context, replica int) error {
const (
_config = `Version: "3"
APNs: /yizhisec/hs_nginx/data/443/oem/data.json
BackupSeafile:
Host: hs-resource-server
Port: 19980
ClientDir:
CompatibleAppFile: /yizhisec/hs_nginx/resource/compatible_apps.csv
OEMFilePath: /yizhisec/hs_nginx/data/443/oem/data.json
StorageDir: /data/storage/client_pkg
WindowsClient:
App7zDir: app7z_0
Dir: windows
Database:
Elastic:
Address: http://es-service.db-es:9200
IPDBFile: /etc/client_server/ipv4.ipdb
Mysql:
Address: mysql.db-mysql:3306
DBName: mie
Password: L0hMysql.
UserName: root
Redis:
Address: redis-master.db-redis:6379
Password: HybridScope0xRed1s.
DisabledFeatureFilePath: /etc/yizhisec/disabled_features
ExportWithBlindWatermark: 1
ExternalOA:
Host: ""
Port: 0
ExternalOASecret:
HsID: ""
HsSecret: ""
GatewayLinkPort: 23209
Key:
Token: TtKVnSzEHO3jRv/GWg3f5k3H1OVfMnPZ1Ke9E6MSCXk=
LicensePubKey: /etc/yizhisec/license/pub_key
Log:
Dir: ./log
Level: 1
Name: client_server
MQTTServer:
Host: emqx-service.db-emqx
Port: 1883
NginxEnvFilePath: /yizhisec/hs_nginx/.env
Pipelines:
- processor:
Script: diA9IGpzb24uZGVjb2RlKGV2ZW50KQpjID0ganNvbi5kZWNvZGUoR2V0UmVzb3VyY2VDb25maWcoImNvbnRyb2xsZXIiKSkKY3B1dCA9IGNbImhhcmR3YXJlX3Jlc291cmNlX3RocmVob2xkIl1bImNwdSJdCm1lbXQgPSBjWyJoYXJkd2FyZV9yZXNvdXJjZV90aHJlaG9sZCJdWyJtZW1vcnkiXQpkaXNrdCA9IGNbImhhcmR3YXJlX3Jlc291cmNlX3RocmVob2xkIl1bImRpc2siXQpzd2l0Y2ggPSBjWyJzd2l0Y2giXQpkZWJ1Z19pbmZvID0ge30KZGVmIGNoZWNrKGNwdSwgbWVtb3J5LCBkaXNrKToKICBpZiBzd2l0Y2ggPT0gRmFsc2U6CiAgICByZXR1cm4KICBtc2cgPSAiIgogIGV2dCA9IHt9CiAgaWYgY3B1ID4gY3B1dDoKICAgICAgbXNnID0gIkNQVeWNoOeUqOi2hei/hyIgKyBzdHIoY3B1dCkgKyAiJSIKICBpZiBtZW1vcnkgPiBtZW10OgogICAgaWYgbGVuKG1zZykgIT0gMDoKICAgICAgbXNnID0gbXNnICsgIu+8jCAiCiAgICBtc2cgPSBtc2cgKyAi5YaF5a2Y5Y2g55So6LaF6L+HIiArIHN0cihtZW10KSArICIlIgogIGlmIGRpc2sgPiBkaXNrdDoKICAgIGlmIGxlbihtc2cpICE9IDA6CiAgICAgIG1zZyA9IG1zZyArICLvvIwgIgogICAgbXNnID0gbXNnICsgIuejgeebmOWNoOeUqOi2hei/hyIgKyBzdHIoZGlza3QpICsgIiUiCiAgaWYgbGVuKG1zZykgIT0gMDoKICAgIG1zZyA9IG1zZyArICLjgILor7flj4rml7bmn6XnnIvmnI3liqHlmajnmoTkvb/nlKjmg4XlhrXmiJbph43mlrDosIPmlbTpmIjlgLzjgIIiCiAgaWYgbGVuKG1zZykgPT0gMDoKICAgIHJldHVybiBldnQKICBpZ25vcmUgPSBDb21wYXJlQW5kU2V0KCJfcGlwZWxpbmVfbHN0X2N0IiwgR2V0VGltZVNlYygpLCA2MCo2MCkKICBpZiBpZ25vcmUgPT0gMDoKICAgIGRlYnVnX2luZm9bImluZm8iXSA9ICJyYXRlIGxpbWl0IGlnbm9yZSIKICAgIHJldHVybgogIG1zZyA9ICLnrZbnlaXmjqfliLblmajnmoQiICsgbXNnCiAgZXZ0WyJkZXRhaWwiXSA9IG1zZwogIGV2dFsiZG9tYWluX2lkIl0gPSAwCiAgZXZ0WyJtZXNzYWdlX3R5cGUiXSA9IDEwMQogIGV2dFsiY3JlYXRlX3RpbWUiXSA9IEdldFRpbWVTZWMoKQogIHJldHVybiBldnQKCm91dHB1dCA9IGpzb24uZW5jb2RlKGNoZWNrKHZbImNwdSJdWyJwZXJjZW50Il0sIHZbIm1lbSJdWyJwZXJjZW50Il0sIHZbImRpc2siXVsicGVyY2VudCJdKSk=
Type: starlark
sink:
Cols:
- create_time
- detail
- domain_id
- message_type
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: message_content
Type: mysql_sink
User: root
source:
Host: redis-master.db-redis:6379
Key: evt_server_state:controller
Pwd: HybridScope0xRed1s.
Tick: 3
Type: redis_source
- processor:
Script: diA9IGpzb24uZGVjb2RlKGV2ZW50KQpjID0ganNvbi5kZWNvZGUoR2V0UmVzb3VyY2VDb25maWcoImdhdGV3YXkiKSkKY3B1dCA9IGNbImhhcmR3YXJlX3Jlc291cmNlX3RocmVob2xkIl1bImNwdSJdCm1lbXQgPSBjWyJoYXJkd2FyZV9yZXNvdXJjZV90aHJlaG9sZCJdWyJtZW1vcnkiXQpkaXNrdCA9IGNbImhhcmR3YXJlX3Jlc291cmNlX3RocmVob2xkIl1bImRpc2siXQpzd2l0Y2ggPSBjWyJzd2l0Y2giXQpkZWJ1Z19pbmZvID0ge30KZGVmIGNoZWNrKCk6CiAgaWYgc3dpdGNoID09IEZhbHNlOgogICAgcmV0dXJuCiAga3MgPSB2LmtleXMoKQogIGlmIGxlbihrcykgPiAwOgogICAgayA9IGtzWzBdCiAgY3B1ID0gdltrXVsiY3B1Il0KICBtZW0gPSB2W2tdWyJtZW0iXQogIGRpc2sgPSB2W2tdWyJkaXNrIl0KICBtc2cgPSAiIgogIGV2dCA9IHt9CiAgaWYgY3B1ID4gY3B1dDoKICAgICAgbXNnID0gIkNQVeWNoOeUqOi2hei/hyIgKyBzdHIoY3B1dCkgKyAiJSIKICBpZiBtZW0gPiBtZW10OgogICAgaWYgbGVuKG1zZykgIT0gMDoKICAgICAgbXNnID0gbXNnICsgIu+8jCAiCiAgICBtc2cgPSBtc2cgKyAi5YaF5a2Y5Y2g55So6LaF6L+HIiArIHN0cihtZW10KSArICIlIgogIGlmIGRpc2sgPiBkaXNrdDoKICAgIGlmIGxlbihtc2cpICE9IDA6CiAgICAgIG1zZyA9IG1zZyArICLvvIwgIgogICAgbXNnID0gbXNnICsgIuejgeebmOWNoOeUqOi2hei/hyIgKyBzdHIoZGlza3QpICsgIiUiCiAgaWYgbGVuKG1zZykgIT0gMDoKICAgIG1zZyA9IG1zZyArICLjgILor7flj4rml7bmn6XnnIvmnI3liqHlmajnmoTkvb/nlKjmg4XlhrXmiJbph43mlrDosIPmlbTpmIjlgLzjgIIiCiAgaWYgbGVuKG1zZykgPT0gMDoKICAgIHJldHVybiBldnQKICBpZ25vcmUgPSBDb21wYXJlQW5kU2V0KCJfcGlwZWxpbmVfbHN0X2d0IiwgR2V0VGltZVNlYygpLCA2MCo2MCkKICBpZiBpZ25vcmUgPT0gMDoKICAgIGRlYnVnX2luZm9bImluZm8iXSA9ICJyYXRlIGxpbWl0IGlnbm9yZSIKICAgIHJldHVybgogIG1zZyA9ICLnvZHlhbPnmoQiICsgbXNnCiAgZXZ0WyJkZXRhaWwiXSA9IG1zZwogIGV2dFsiZG9tYWluX2lkIl0gPSAwCiAgZXZ0WyJtZXNzYWdlX3R5cGUiXSA9IDEwMgogIGV2dFsiY3JlYXRlX3RpbWUiXSA9IEdldFRpbWVTZWMoKQogIHJldHVybiBldnQKCm91dHB1dCA9IGpzb24uZW5jb2RlKGNoZWNrKCkp
Type: starlark
sink:
Cols:
- create_time
- detail
- domain_id
- message_type
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: message_content
Type: mysql_sink
User: root
source:
Host: redis-master.db-redis:6379
Key: evt_server_state:gateway
Pwd: HybridScope0xRed1s.
Tick: 3
Type: redis_source
- processor:
Script: diA9IGpzb24uZGVjb2RlKGV2ZW50KQpjID0ganNvbi5kZWNvZGUoR2V0TGljZW5zZUNvbmZpZygibGljZW5zZSIpKQpsZWZ0ID0gY1sibGljZW5zZV90aHJlaG9sZCJdWyJyZW1haW5pbmdfZGF5Il0Kc3dpdGNoID0gY1sic3dpdGNoIl0KZGVidWdfaW5mbyA9IHt9CmRlZiBjaGVjaygpOgogIGlmIHN3aXRjaCA9PSBGYWxzZToKICAgIHJldHVybgogIAogIGV0ID0gdlsiZXhwaXJlX3RpbWUiXQogIGV2dCA9IHt9CiAgY3VyID0gR2V0VGltZVNlYygpCiAgaWYgZXQgPCAoY3VyICsgbGVmdCAqIDg2NDAwKToKICAgIGlnbm9yZSA9IENvbXBhcmVBbmRTZXQoIl9waXBlbGluZV9sc3RfbCIsIEdldFRpbWVTZWMoKSwgNjAqNjAqMjQpCiAgICBpZiBpZ25vcmUgPT0gMDoKICAgICAgZGVidWdfaW5mb1siaW5mbyJdID0gInJhdGUgbGltaXQgaWdub3JlIgogICAgICByZXR1cm4KICAgIGV2dFsiZGV0YWlsIl0gPSAi5Lqn5ZOB5o6I5p2D5Ymp5L2Z5aSp5pWw5bCP5LqOIiArIHN0cihsZWZ0KSArICLlpKnjgILor7flj4rml7bmn6XnnIvkvb/nlKjmg4XlhrXjgIHph43mlrDosIPmlbTpmIjlgLzmiJbogZTns7vnrqHnkIblkZjmt7vliqDmjojmnYPjgIIiCiAgICBldnRbImRvbWFpbl9pZCJdID0gMAogICAgZXZ0WyJtZXNzYWdlX3R5cGUiXSA9IDIwMgogICAgZXZ0WyJjcmVhdGVfdGltZSJdID0gR2V0VGltZVNlYygpCiAgICByZXR1cm4gZXZ0Cm91dHB1dCA9IGpzb24uZW5jb2RlKGNoZWNrKCkp
Type: starlark
sink:
Cols:
- create_time
- detail
- domain_id
- message_type
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: message_content
Type: mysql_sink
User: root
source:
Host: redis-master.db-redis:6379
Key: license_state_cache:expire
Pwd: HybridScope0xRed1s.
Tick: 3
Type: redis_source
- processor:
Script: diA9IGpzb24uZGVjb2RlKGV2ZW50KQpjID0ganNvbi5kZWNvZGUoR2V0TGljZW5zZUNvbmZpZygibGljZW5zZV9kZXZpY2UiKSkKY2wgPSBjWyJsaWNlbnNlX2RldmljZV90aHJlaG9sZCJdWyJjbGllbnRfbGVmdCJdCm1sID0gY1sibGljZW5zZV9kZXZpY2VfdGhyZWhvbGQiXVsibW9iaWxlX2xlZnQiXQptYyA9IHZbIm1heF9jbGllbnRfY291bnQiXQptYiA9IHZbIm1heF9tb2JpbGVfY2xpZW50X2NvdW50Il0KY2MgPSB2WyJjdXJyZW50X2NsaWVudF9jb3VudCJdCmNtID0gdlsiY3VycmVudF9tb2JpbGVfY2xpZW50X2NvdW50Il0Kc3dpdGNoID0gY1sic3dpdGNoIl0KZGVidWdfaW5mbyA9IHt9CmRlZiBjaGVjaygpOgogIGlmIHN3aXRjaCA9PSBGYWxzZToKICAgIHJldHVybgogIGV2dCA9IHt9CiAgbXNnID0gIiIKICBpZiAobWMtY2MpIDwgY2w6CiAgICBtc2cgPSBtc2cgKyAi5a6i5oi356uv5L2/55So5pWw6YeP5bCR5LqOIitzdHIoY2wpKyLkuKoiCiAgaWYgbGVuKG1zZykgPiAwOgogICAgbXNnID0gbXNnICsgIu+8jCIKICBpZiAobWItY20pIDwgbWw6CiAgICBtc2cgPSBtc2cgKyAi56e75Yqo56uv5L2/55So5pWw6YeP5bCR5LqOIitzdHIobWwpKyLkuKrjgIIiCiAgaWYgbGVuKG1zZykgPiAwOgogICAgaWdub3JlID0gQ29tcGFyZUFuZFNldCgiX3BpcGVsaW5lX2xzdF9sZCIsIEdldFRpbWVTZWMoKSwgNjAqNjApCiAgICBpZiBpZ25vcmUgPT0gMDoKICAgICAgZGVidWdfaW5mb1siaW5mbyJdID0gInJhdGUgbGltaXQgaWdub3JlIgogICAgICByZXR1cm4KICAgIGV2dFsiZGV0YWlsIl0gPSBtc2cgKyAi6K+35Y+K5pe25p+l55yL5L2/55So5oOF5Ya144CB6YeN5paw6LCD5pW06ZiI5YC85oiW6IGU57O7566h55CG5ZGY5re75Yqg5o6I5p2D44CCIgogICAgZXZ0WyJkb21haW5faWQiXSA9IDAKICAgIGV2dFsibWVzc2FnZV90eXBlIl0gPSAyMDIKICAgIGV2dFsiY3JlYXRlX3RpbWUiXSA9IEdldFRpbWVTZWMoKQogICAgcmV0dXJuIGV2dApvdXRwdXQgPSBqc29uLmVuY29kZShjaGVjaygpKQ==
Type: starlark
sink:
Cols:
- create_time
- detail
- domain_id
- message_type
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: message_content
Type: mysql_sink
User: root
source:
Host: redis-master.db-redis:6379
Key: license_state_cache:online
Pwd: HybridScope0xRed1s.
Tick: 3
Type: redis_source
- processor:
Script: diA9IGpzb24uZGVjb2RlKGV2ZW50KQpkZWJ1Z19pbmZvID0ge30KZGVmIGNoZWNrKCk6CiAgZXZ0ID0ge30KICBtc2cgPSAiIgogIAogIGRpZCA9IHZbImRvbWFpbl9pZCJdCiAgdWlkID0gdlsidXNlcl9pZCJdCiAgdW5hbWUgPSBHZXRVc2VyTmFtZSh1aWQpCiAgbGltaXQgPSB2WyJsaW1pdCJdCiAgcmFuZ2UgPSB2WyJyYW5nZSJdCiAgcnUgPSB2WyJyYW5nZV91bml0Il0KICBzdSA9IHZbInNpemVfdW5pdCJdCiAgZSA9IHZbImV2dCJdCiAgaWYgZSA9PSAiY291bnQiOgogICAgZXZ0WyJtZXNzYWdlX3R5cGUiXSA9IDMwMQogICAgdW5pdCA9IHN0cihyYW5nZSkKICAgIGlmIHJ1ID09ICJIIiBvciBydSA9PSAiaCI6CiAgICAgICAgdW5pdCA9IHVuaXQgKyAi5bCP5pe2IgogICAgaWYgcnUgPT0gIkQiIG9yIHJ1ID09ICJkIjoKICAgICAgICB1bml0ID0gdW5pdCArICLlpKkiCiAgICBtc2cgPSAi5ZyoIiArIHVuaXQgKyAi5YaF77yMIiArIHVuYW1lICsgIueahOWfn+WGheaWh+S7tuWvvOWHuuaVsOmHj+i2hei/hyIgKyBzdHIobGltaXQpICsgIuS4qu+8jCIKICBpZiBlID09ICJzaXplIjoKICAgIGV2dFsibWVzc2FnZV90eXBlIl0gPSAzMDIKICAgIHVuaXQgPSBzdHIocmFuZ2UpCiAgICBpZiBydSA9PSAiSCIgb3IgcnUgPT0gImgiOgogICAgICAgIHVuaXQgPSB1bml0ICsgIuWwj+aXtiIKICAgIGlmIHJ1ID09ICJEIiBvciBydSA9PSAiZCI6CiAgICAgICAgdW5pdCA9IHVuaXQgKyAi5aSpIgogICAgbXNnID0gIuWcqCIgKyB1bml0ICsgIuWGhe+8jCIgKyB1bmFtZSArICLnmoTln5/lhoXmlofku7blr7zlh7rlpKflsI/otoXov4ciICsgc3RyKGxpbWl0KQogICAgaWYgc3UgPT0gIk0iIG9yIHN1ID09ICJtIjoKICAgICAgICBtc2cgPSBtc2cgKyAiTULvvIwiCiAgICBpZiBzdSA9PSAiRyIgb3Igc3UgPT0gImciOgogICAgICAgIG1zZyA9IG1zZyArICJHQu+8jCIKICAgIGlmIHN1ID09ICJUIiBvciBzdSA9PSAidCI6CiAgICAgICAgbXNnID0gbXNnICsgIlRC77yMIgoKICBpZiBsZW4obXNnKSA+IDA6CiAgICBldnRbImRldGFpbCJdID0gbXNnICsgIuivt+WPiuaXtui3n+i4quWvvOWHuuaWh+S7tueahOaVj+aEn+eoi+W6puaIlumHjeaWsOiwg+aVtOmYiOWAvOOAgiIKICAgIGV2dFsiZG9tYWluX2lkIl0gPSBkaWQKICAgIGV2dFsiY3JlYXRlX3RpbWUiXSA9IEdldFRpbWVTZWMoKQogICAgcmV0dXJuIGV2dApvdXRwdXQgPSBqc29uLmVuY29kZShjaGVjaygpKQ==
Type: starlark
sink:
Cols:
- create_time
- detail
- domain_id
- message_type
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: message_content
Type: mysql_sink
User: root
source:
DB: mie
Host: mysql.db-mysql:3306
Pwd: L0hMysql.
Table: evt_export_file_over
Type: mysql_source
User: root
Sentry:
TracesSampleRate: 1
StaticURLPathPrefix:
NetworkAppIcon: /user/avatar
Storage:
PublicFolderFileDir: /data/storage/public_folder_file
TmpDir: /data/storage/tmp
UploadedFilesDir: /data/storage/uploaded_files
TranslationPath: translation.csv
UpgradeCheckFilePath: /yizhisec/hs_nginx/resource/release_version_record.csv
UserManagement:
Host: user-service
Port: 9013
WatermarkServer:
Host: hs-watermark
Port: 9014
Web:
Host: 0.0.0.0
Mode: release
Port: 9129
Web2:
Host: 0.0.0.0
Mode: release
Port: 9024
WebMessages:
Host: 0.0.0.0
Mode: release
Port: 9025
WorkDir: /yizhisec/client_server
YosGuard:
Host: 172.17.0.1
Port: 7788`
_upsert = `#!/bin/bash
kubectl create configmap config-client --namespace hsv2 --from-file=config.yml=./config.yml --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f deployment.yaml
kubectl rollout restart deployment client-deployment -n hsv2`
)
var (
err error
workdir = filepath.Join(opt.Cfg.Make.Dir, "app", "client")
)
logger.Info("☑️ maker.AppClient: 开始构建 client 应用..., dir = %s", workdir)
logger.Debug("☑️ maker.AppClient: 开始创建工作目录 = %s", workdir)
if err = os.MkdirAll(workdir, 0755); err != nil {
logger.Debug("❌ maker.AppClient: 创建目录失败: %v", err)
return err
}
logger.Debug("✅ maker.AppClient: 创建工作目录成功 = %s", workdir)
if replica < 1 {
replica = 1
}
logger.Debug("☑️ maker.AppClient: 开始构建 yaml 资源文件")
content := []byte(fmt.Sprintf(resource.YAMLAppClient, replica))
if err = os.WriteFile(filepath.Join(workdir, "deployment.yaml"), content, 0644); err != nil {
logger.Debug("❌ maker.AppClient: 写入 deployment.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppClient: 开始 yaml 资源文件成功")
logger.Debug("☑️ maker.AppClient: 开始构建 config 文件")
if err = os.WriteFile(filepath.Join(workdir, "config.yml"), []byte(_config), 0644); err != nil {
logger.Debug("❌ maker.AppClient: 写入 config.yml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppClient: 构建 config 文件成功")
logger.Debug("☑️ maker.AppClient: 开始构建 upsert.sh 脚本")
if err = os.WriteFile(filepath.Join(workdir, "upsert.sh"), []byte(_upsert), 0755); err != nil {
logger.Debug("❌ maker.AppClient: 写入 upsert.sh 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppClient: 构建 upsert.sh 脚本成功")
logger.Info("✅ maker.AppClient: 构建 client 应用成功!!!")
return nil
}

View File

@@ -0,0 +1,99 @@
package maker
import (
"context"
"fmt"
"os"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/resource"
)
func (m *maker) AppGateway(ctx context.Context, replica int) error {
const (
_config = `Version: "3"
Database:
Elastic:
Address: http://es-service.db-es:9200
Mysql:
Address: mysql.db-mysql:3306
DBName: mie
Password: L0hMysql.
UserName: root
Redis:
Address: redis-master.db-redis:6379
Password: HybridScope0xRed1s.
Gateway:
Cert:
ClientCrt: /yizhisec/ssl/client.crt
ClientKey: /yizhisec/ssl/client.key
TokenFilePath: /etc/yizhisec/token
Key:
Token: TtKVnSzEHO3jRv/GWg3f5k3H1OVfMnPZ1Ke9E6MSCXk=
Log:
Dir: ./log
Level: 1
Name: gateway_controller
Sentry:
TracesSampleRate: 1
UserManagement:
Host: user-service
Port: 9013
Web:
Host: 0.0.0.0
Mode: release
Port: 9012
WorkDir: /yizhisec/gateway_controller/workspace
YosGuard:
Host: 172.17.0.1
Port: 7788`
_upsert = `#!/bin/bash
kubectl create configmap config-gateway --namespace hsv2 --from-file=config.yml=./config.yml --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f deployment.yaml
kubectl rollout restart deployment gateway-deployment -n hsv2`
)
var (
err error
workdir = filepath.Join(opt.Cfg.Make.Dir, "app", "gateway")
)
logger.Info("☑️ maker.AppGateway: 开始构建 gateway 应用..., dir = %s", workdir)
logger.Debug("☑️ maker.AppGateway: 开始创建工作目录 = %s", workdir)
if err = os.MkdirAll(workdir, 0755); err != nil {
logger.Debug("❌ maker.AppGateway: 创建目录失败: %v", err)
return err
}
logger.Debug("✅ maker.AppGateway: 创建工作目录成功 = %s", workdir)
if replica < 1 {
replica = 1
}
logger.Debug("☑️ maker.AppGateway: 开始构建 yaml 资源文件")
content := []byte(fmt.Sprintf(resource.YAMLAppGateway, replica))
if err = os.WriteFile(filepath.Join(workdir, "deployment.yaml"), content, 0644); err != nil {
logger.Debug("❌ maker.AppGateway: 写入 deployment.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppGateway: 开始 yaml 资源文件成功")
logger.Debug("☑️ maker.AppGateway: 开始构建 config 文件")
if err = os.WriteFile(filepath.Join(workdir, "config.yml"), []byte(_config), 0644); err != nil {
logger.Debug("❌ maker.AppGateway: 写入 config.yml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppGateway: 构建 config 文件成功")
logger.Debug("☑️ maker.AppGateway: 开始构建 upsert.sh 脚本")
if err = os.WriteFile(filepath.Join(workdir, "upsert.sh"), []byte(_upsert), 0755); err != nil {
logger.Debug("❌ maker.AppGateway: 写入 upsert.sh 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppGateway: 构建 upsert.sh 脚本成功")
logger.Info("✅ maker.AppGateway: 构建 gateway 应用成功!!!")
return nil
}

View File

@@ -0,0 +1,197 @@
package maker
import (
"context"
"fmt"
"os"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/resource"
)
func (m *maker) AppMie(ctx context.Context, replica int) error {
const (
_config = `Version: "3"
BackupSeafile:
Host: hs-resource-server
Port: 19980
account_manager:
address: http://user-service:9013
client_server:
msg: http://client-service:9025
api: http://client-service:9024
web: http://client-service:9129
backend_queue_names:
request: request_que
web: web_que
backup_database_server:
host: hs-backup-server
port: 9349
backup_seafile_server:
host: hs-resource-server
port: 19980
clientPKG:
android:
client_pkg_dir: /data/storage/client_pkg/android
client_pkg_file_path: /data/storage/client_pkg/android/SecureApplication-Client-Android.apk
client_pkg_name: SecureApplication-Client-Android.apk
client_version_file_path: /data/storage/client_pkg/android/android_version.json
dir: /data/storage/client_pkg
ios:
client_pkg_dir: /data/storage/client_pkg/ios
client_pkg_file_path: ''
client_pkg_name: ''
client_version_file_path: /data/storage/client_pkg/ios/ios_version.json
linux:
client_pkg_dir: /data/storage/client_pkg/linux
client_pkg_file_path: /data/storage/client_pkg/linux/hscore-ubuntu-22.04-amd64.deb
client_pkg_name: hscore-ubuntu-22.04-amd64.deb
client_version_file_path: /data/storage/client_pkg/linux/linux_version.json
mac:
client_pkg_beta_file_path: /yizhisec/hs_nginx/resource/hybridscope-client-mac-beta.pkg
client_pkg_beta_name: hybridscope-client-mac-beta.pkg
client_pkg_dir: /data/storage/client_pkg/mac
client_pkg_file_path: /yizhisec/hs_nginx/resource/hybridscope-client-mac.pkg
client_pkg_name: hybridscope-client-mac.pkg
client_version_file_path: /data/storage/client_pkg/mac/mac_version.json
oem_dir: /yizhisec/hs_nginx/data/443/oem
oem_file_path: /yizhisec/hs_nginx/data/443/oem/data.json
windows:
client_main_zip_name: app.7z
client_pkg_cfg_file_name: login.conf
client_pkg_dir: /data/storage/client_pkg/windows
client_pkg_unzip_dir: package
client_pkg_zip: /data/storage/client_pkg/windows/dsclient.zip
client_zip_version: version
databases:
elasticsearch:
host: es-service.db-es
port: 9200
ipdb:
path: /etc/mie-server/ipdb/ip.ipdb
mysql:
db: mie
host: mysql-cluster-mysql-master.db-mysql
password: L0hMysql.
port: 3306
username: root
redis:
host: redis-master.db-redis
password: HybridScope0xRed1s.
port: 6379
username: ''
exe_root_license:
path: /etc/mie-server/root.pem
gateway_service:
host: gateway-service.hsv2
port: 9012
host: 0.0.0.0
license:
version: 3
license_init_conf: /etc/mie-server/server_license_init.conf
public_key: /etc/mie-server/license/pub_key
log_level: 20
mqtt_server:
host: emqx-service.db-emqx
port: 1883
port: 9002
resource_server:
address: http://hs-resource-server:19980
secret_key: i345piuh48776lkjsdhfsdfljho
sentry_dsn: null
static_urlpath_prefix:
network_app_icon: /user/avatar
storage:
avatar_dir: /data/storage/avatar
mobile_app_dir: /yizhisec/hs_nginx
network_app_icon: network_app
patch_dir: /data/storage/patch
public_folder_file_dir: /data/storage/public_folder_file
share_file_storage: /data/storage/share_file
sync_avatar_dir: sync
tmp_dir: /data/storage/tmp
upload_avatar_dir: local
uploaded_files: /data/storage/uploaded_files
token_key: TtKVnSzEHO3jRv/GWg3f5k3H1OVfMnPZ1Ke9E6MSCXk=
translation_path: /etc/mie-server/translation.csv
yosguard_service:
host: 172.17.0.1
port: 7788
ElinkLogin: true
export_with_blind_watermark: true`
_upsert = `#!/bin/bash
kubectl create configmap config-api --namespace hsv2 --from-file=conf.yml=./conf.yml --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f deployment-api.yaml
kubectl apply -f deployment-sweeper.yaml
kubectl apply -f deployment-worker.yaml
kubectl apply -f deployment-cron.yaml
kubectl rollout restart deployment api-deployment -n hsv2`
)
var (
err error
workdir = filepath.Join(opt.Cfg.Make.Dir, "app", "mie")
)
logger.Info("☑️ maker.AppMie: 开始构建 mie ... workdir = %s", workdir)
logger.Debug("☑️ maker.AppMie: 开始创建工作目录 = %s", workdir)
if err = os.MkdirAll(workdir, 0755); err != nil {
logger.Debug("❌ maker.AppMie: 创建目录失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 创建工作目录成功 = %s", workdir)
if replica < 1 {
replica = 1
}
logger.Debug("☑️ maker.AppMie: 写入 conf.yml 文件..., dest = %s", filepath.Join(workdir, "conf.yml"))
if err = os.WriteFile(filepath.Join(workdir, "conf.yml"), []byte(_config), 0644); err != nil {
logger.Debug("❌ maker.AppMie: 写入 conf.yml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 conf.yml 文件成功, dest = %s", filepath.Join(workdir, "conf.yml"))
logger.Debug("☑️ maker.AppMie: 写入 deployment-api.yaml 文件..., dest = %s", filepath.Join(workdir, "deployment-api.yaml"))
apiYaml := []byte(fmt.Sprintf(resource.YAMLAppMieAPI, replica))
if err = os.WriteFile(filepath.Join(workdir, "deployment-api.yaml"), apiYaml, 0644); err != nil {
logger.Debug("❌ maker.AppMie: 写入 deployment-api.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 deployment-api.yaml 文件成功, dest = %s", filepath.Join(workdir, "deployment-api.yaml"))
logger.Debug("☑️ maker.AppMie: 写入 deployment-sweeper.yaml 文件..., dest = %s", filepath.Join(workdir, "deployment-sweeper.yaml"))
if err = os.WriteFile(filepath.Join(workdir, "deployment-sweeper.yaml"), resource.YAMLAppMieSweeper, 0644); err != nil {
logger.Debug("❌ maker.AppMie: 写入 deployment-sweeper.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 deployment-sweeper.yaml 文件成功, dest = %s", filepath.Join(workdir, "deployment-sweeper.yaml"))
logger.Debug("☑️ maker.AppMie: 写入 deployment-worker.yaml 文件..., dest = %s", filepath.Join(workdir, "deployment-worker.yaml"))
if err = os.WriteFile(filepath.Join(workdir, "deployment-worker.yaml"), resource.YAMLAppMieWorker, 0644); err != nil {
logger.Debug("❌ maker.AppMie: 写入 deployment-worker.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 deployment-worker.yaml 文件成功, dest = %s", filepath.Join(workdir, "deployment-worker.yaml"))
logger.Debug("☑️ maker.AppMie: 写入 deployment-cron.yaml 文件..., dest = %s", filepath.Join(workdir, "deployment-cron.yaml"))
if err = os.WriteFile(filepath.Join(workdir, "deployment-cron.yaml"), resource.YAMLAppMieCron, 0644); err != nil {
logger.Debug("❌ maker.AppMie: 写入 deployment-cron.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 deployment-cron.yaml 文件成功, dest = %s", filepath.Join(workdir, "deployment-cron.yaml"))
logger.Debug("☑️ maker.AppMie: 写入 upsert.sh 文件..., dest = %s", filepath.Join(workdir, "upsert.sh"))
if err = os.WriteFile(filepath.Join(workdir, "upsert.sh"), []byte(_upsert), 0755); err != nil {
logger.Debug("❌ maker.AppMie: 写入 upsert.sh 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppMie: 写入 upsert.sh 文件成功, dest = %s", filepath.Join(workdir, "upsert.sh"))
logger.Info("✅ maker.AppMie: 构建 mie 成功!!! workdir = %s", workdir)
return nil
}

View File

@@ -0,0 +1,161 @@
package maker
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"github.com/samber/lo"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/archiver"
"yizhisec.com/hsv2/forge/pkg/resource"
)
func (m *maker) AppOEM(ctx context.Context, replica int, vendor string) error {
const (
_nginx = `user root;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 512;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
sendfile on;
keepalive_timeout 65;
client_max_body_size 10M;
server {
listen 80;
root /data;
location / {
try_files \$uri \$uri/ =404;
}
}
}`
_dockerfile = `FROM hub.yizhisec.com/external/nginx:1.29.1-alpine3.22
WORKDIR /data
COPY oem /data/oem
COPY nginx.conf /etc/nginx/nginx.conf
CMD ["nginx", "-g", "daemon off;"]`
_image = "hub.yizhisec.com/hybridscope/v2/oem-%s:latest"
)
type Vendor struct {
URL string
Dir string
}
var (
vendorURLMap = map[string]*Vendor{
"standard": &Vendor{URL: "https://artifactory.yizhisec.com/artifactory/yizhisec-release/oem/release/2.1.0-std/oem.tar.gz", Dir: "oem"},
"elink": &Vendor{URL: "https://artifactory.yizhisec.com/artifactory/yizhisec-release/oem/release/2.1.0-std/oem_csgElink.tar.gz", Dir: "oem_csgElink"},
"noah": &Vendor{URL: "https://artifactory.yizhisec.com/artifactory/yizhisec-release/oem/release/2.1.0-std/oem_noah.tar.gz", Dir: "oem_noah"},
"heishuimeng": &Vendor{URL: "https://artifactory.yizhisec.com/artifactory/yizhisec-release/oem/release/2.1.0-std/oem_heishuimeng.tar.gz", Dir: "oem_heishuimeng"},
}
err error
_vendor *Vendor
ok bool
workdir = filepath.Join(opt.Cfg.Make.Dir, "app", "oem")
output []byte
)
logger.Info("☑️ maker.AppOEM: 开始构建 oem[%s], workdir = %s", vendor, workdir)
if _vendor, ok = vendorURLMap[vendor]; !ok {
supported := lo.MapToSlice(vendorURLMap, func(key string, _ *Vendor) string {
return key
})
logger.Debug("❌ maker.AppOEM: vendor not supported, 支持的 vendor 有: %v", supported)
return fmt.Errorf("请检查 vendor 是否正确, 支持的 vendor 有: %v", supported)
}
// 1. make workdir
logger.Debug("☑️ maker.AppOEM: 开始创建 workdir = %s", workdir)
if err = os.MkdirAll(workdir, 0o755); err != nil {
return err
}
logger.Debug("✅ maker.AppOEM: workdir 创建成功 = %s", workdir)
// 2. download oem.tar.gz
logger.Debug("☑️ maker.AppOEM: 开始下载 oem[%s] url = %s", vendor, _vendor)
if err = archiver.DownloadAndExtract(ctx, _vendor.URL, workdir); err != nil {
logger.Debug("❌ maker.AppOEM: oem[%s] tar 下载失败, url = %s, err = %v", vendor, _vendor.URL, err)
return err
}
if _vendor.Dir != "oem" {
if err = os.Rename(
filepath.Join(workdir, _vendor.Dir),
filepath.Join(workdir, "oem"),
); err != nil {
logger.Debug("❌ maker.AppOEM: oem[%s] tar 重命名失败, err = %v", vendor, err)
return err
}
}
logger.Debug("✅ maker.AppOEM: oem[%s] tar 下载成功", vendor)
// 3. write nginx.conf
logger.Debug("☑️ maker.AppOEM: 开始写入 nginx.conf")
if err = os.WriteFile(
filepath.Join(workdir, "nginx.conf"),
[]byte(_nginx),
0o644,
); err != nil {
logger.Debug("❌ maker.AppOEM: nginx.conf 写入失败, err = %v", err)
return err
}
logger.Debug("✅ maker.AppOEM: nginx.conf 写入成功")
// 4. write Dockerfile
logger.Debug("☑️ maker.AppOEM: 开始写入 Dockerfile")
if err = os.WriteFile(
filepath.Join(workdir, "Dockerfile"),
[]byte(_dockerfile),
0o644,
); err != nil {
logger.Debug("❌ maker.AppOEM: Dockerfile 写入失败, err = %v", err)
return err
}
logger.Debug("✅ maker.AppOEM: Dockerfile 写入成功")
// 5. build docker image
imageName := fmt.Sprintf(_image, vendor)
logger.Debug("☑️ maker.AppOEM: 开始构建 docker image = %s", imageName)
// docker build -t <image_name> -f <workdir/Dockerfile> <workdir>
_cmd := exec.CommandContext(ctx, "docker", "build", "-t", imageName, "-f", filepath.Join(workdir, "Dockerfile"), workdir)
if output, err = _cmd.CombinedOutput(); err != nil {
logger.Debug("❌ maker.AppOEM: docker image 构建失败, err = %v, output = %s", err, string(output))
return err
}
logger.Debug("✅ maker.AppOEM: docker image 构建成功, image = %s", imageName)
// 6. render oem.yaml
logger.Debug("☑️ maker.AppOEM: 开始渲染 deployment.yaml")
oemYAML := fmt.Sprintf(resource.YAMLAppOEM, replica, imageName)
if err = os.WriteFile(
filepath.Join(workdir, "deployment.yaml"),
[]byte(oemYAML),
0o644,
); err != nil {
logger.Debug("❌ maker.AppOEM: deployment.yaml 写入失败, err = %v", err)
return err
}
logger.Debug("✅ maker.AppOEM: deployment.yaml 写入成功")
logger.Info("✅ maker.AppOEM: 开始构建 oem[%s] 成功!!!", vendor)
return nil
}

View File

@@ -2,8 +2,128 @@ package maker
import ( import (
"context" "context"
"fmt"
"os"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/resource"
) )
func (m *maker) AppUser(ctx context.Context, replica int) error { func (m *maker) AppUser(ctx context.Context, replica int) error {
const (
_config = `Version: "3"
Database:
Elastic:
Address: http://es-service.db-es:9200
IPDB:
Path: /etc/hs_user_management/ipdb/ip.ipdb
Mysql:
Address: mysql.db-mysql:3306
DBName: mie
Password: L0hMysql.
UserName: root
Redis:
Address: redis-master.db-redis:6379
Password: HybridScope0xRed1s.
DisabledFeatureFilePath: /etc/yizhisec/disabled_features
EnableTenant: false
Key:
Token: TtKVnSzEHO3jRv/GWg3f5k3H1OVfMnPZ1Ke9E6MSCXk=
LicensePubKey: /etc/yizhisec/license/pub_key
Log:
Dir: ./log
Level: 1
Name: hs_user_management
Sentry:
TracesSampleRate: 1
Sso:
DingTalk:
ApiHost: oapi.dingtalk.com
LoginUrl: https://oapi.dingtalk.com/connect/qrconnect
Feishu:
ApiHost: open.feishu.cn
LoginUrl: https://open.feishu.cn/open-apis/authen/v1/index
Proxy:
CallbackHost: hssso.yizhisec.com:33443
Cert:
ClientCrt: /etc/hs_user_management/proxy/certs/client.crt
ClientKey: /etc/hs_user_management/proxy/certs/client.key
ServiceHost: hssso.yizhisec.com:33444
RedirectPath:
BoundFailed: /#/accountSettings/thirdAccount
BoundSuccess: /#/accountSettings/thirdAccount
LoginFailed: /#/thirdError
LoginNeedBoundUser: /#/bind
LoginSuccess: /#/
WorkWeiXin:
ApiHost: qyapi.weixin.qq.com
LoginUrl: https://login.work.weixin.qq.com/wwlogin/sso/login
Storage:
Avatar:
ADSyncDir: ad
Base: /data/storage/avatar
LDAPSyncDir: ldap
LocalDir: local
SyncDir: sync
TranslationPath: translation.csv
Web:
Host: 0.0.0.0
Mode: release
Port: 9013
WorkDir: /yizhisec/hs_user_management/workspace
YosGuard:
Host: 172.17.0.1
Port: 7788
ElinkLogin: false`
_upsert = `#!/bin/bash
kubectl create configmap config-user --namespace hsv2 --from-file=config.yml=./config.yml --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f deployment.yaml
kubectl rollout restart deployment user-deployment -n hsv2`
)
var (
err error
workdir = filepath.Join(opt.Cfg.Make.Dir, "app", "user")
)
logger.Info("☑️ maker.AppUser: 开始构建 user 应用..., dir = %s", workdir)
logger.Debug("☑️ maker.AppUser: 开始创建工作目录 = %s", workdir)
if err = os.MkdirAll(workdir, 0755); err != nil {
logger.Debug("❌ maker.AppUser: 创建目录失败: %v", err)
return err
}
logger.Debug("✅ maker.AppUser: 创建工作目录成功 = %s", workdir)
if replica < 1 {
replica = 1
}
logger.Debug("☑️ maker.AppUser: 开始构建 yaml 资源文件")
content := []byte(fmt.Sprintf(resource.YAMLAppUser, replica))
if err = os.WriteFile(filepath.Join(workdir, "deployment.yaml"), []byte(content), 0644); err != nil {
logger.Debug("❌ maker.AppUser: 写入 deployment.yaml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppUser: 开始 yaml 资源文件成功")
// 写入 config.yml
logger.Debug("☑️ maker.AppUser: 开始构建 config 文件")
if err = os.WriteFile(filepath.Join(workdir, "config.yml"), []byte(_config), 0644); err != nil {
logger.Debug("❌ maker.AppUser: 写入 config.yml 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppUser: 构建 config 文件成功")
logger.Debug("☑️ maker.AppUser: 开始构建 upsert.sh 脚本")
if err = os.WriteFile(filepath.Join(workdir, "upsert.sh"), []byte(_upsert), 0755); err != nil {
logger.Debug("❌ maker.AppUser: 写入 upsert.sh 失败: %v", err)
return err
}
logger.Debug("✅ maker.AppUser: 构建 upsert.sh 脚本成功")
logger.Info("✅ maker.AppUser: 构建 user 应用成功!!!")
return nil return nil
} }

View File

@@ -1,37 +0,0 @@
package maker
import (
"context"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/archiver"
)
func (m *maker) Deb(ctx context.Context) error {
var (
tarURL = "https://artifactory.yizhisec.com:443/artifactory/filestore/hsv3/deb/docker.tar.gz"
binDir = filepath.Join(opt.Cfg.Make.Dir, "dependency", "deb")
)
logger.Info("☑️ 开始准备 deb(docker) 文件...")
logger.Debug("下载地址: %s", tarURL)
logger.Debug("目标目录: %s", binDir)
if err := archiver.DownloadAndExtract(
ctx,
tarURL,
binDir,
archiver.WithInsecureSkipVerify(),
archiver.WithGzipCompression(true),
); err != nil {
logger.Info("❌ 下载并解压 deb(docker) 文件失败")
logger.Debug("下载并解压 deb(docker) 文件失败: %v", err)
return err
}
logger.Info("✅ 准备 deb(docker) 文件成功!!!")
return nil
}

View File

@@ -6,7 +6,6 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"regexp"
"gitea.loveuer.com/yizhisec/pkg3/logger" "gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt" "yizhisec.com/hsv2/forge/internal/opt"
@@ -38,7 +37,7 @@ func WithElasticMemRate(memRate int) ElasticOpt {
func WithElasticStorageGi(storage string) ElasticOpt { func WithElasticStorageGi(storage string) ElasticOpt {
return func(o *elasticOpt) { return func(o *elasticOpt) {
if matched, _ := regexp.MatchString(`^\d+(\.\d+)?[EPTGMK]i?$`, storage); matched { if opt.StorageSizeReg.MatchString(storage) {
o.Storage = storage o.Storage = storage
} }
} }

View File

@@ -9,6 +9,7 @@ import (
"gitea.loveuer.com/yizhisec/pkg3/logger" "gitea.loveuer.com/yizhisec/pkg3/logger"
"github.com/samber/lo" "github.com/samber/lo"
"yizhisec.com/hsv2/forge/internal/opt" "yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/model"
) )
type imageOpt struct { type imageOpt struct {
@@ -32,10 +33,12 @@ func WithImageSave(filename string) ImageOpt {
} }
} }
func WithImageForcePull() ImageOpt { func WithImageForcePull(force bool) ImageOpt {
return func(o *imageOpt) { return func(o *imageOpt) {
if force {
o.ForcePull = true o.ForcePull = true
} }
}
} }
func (m *maker) Image(ctx context.Context, name string, opts ...ImageOpt) error { func (m *maker) Image(ctx context.Context, name string, opts ...ImageOpt) error {
@@ -107,14 +110,8 @@ SAVE:
} }
func (m *maker) Images(ctx context.Context) error { func (m *maker) Images(ctx context.Context) error {
type Images struct {
Name string
Fallback string
Save string
Force bool
}
var images = []*Images{ var images = []*model.Image{
{Name: "quay.io/k0sproject/apiserver-network-proxy-agent:v0.32.0", Fallback: "hub.yizhisec.com/external/apiserver-network-proxy-agent:v0.32.0", Save: "k0s.apiserver-network-proxy-agent.tar"}, {Name: "quay.io/k0sproject/apiserver-network-proxy-agent:v0.32.0", Fallback: "hub.yizhisec.com/external/apiserver-network-proxy-agent:v0.32.0", Save: "k0s.apiserver-network-proxy-agent.tar"},
{Name: "quay.io/k0sproject/cni-node:1.7.1-k0s.0", Fallback: "", Save: "k0s.cni-node.tar"}, {Name: "quay.io/k0sproject/cni-node:1.7.1-k0s.0", Fallback: "", Save: "k0s.cni-node.tar"},
{Name: "quay.io/k0sproject/coredns:1.12.2", Fallback: "", Save: "k0s.coredns.tar"}, {Name: "quay.io/k0sproject/coredns:1.12.2", Fallback: "", Save: "k0s.coredns.tar"},
@@ -173,10 +170,7 @@ func (m *maker) Images(ctx context.Context) error {
opts := []ImageOpt{ opts := []ImageOpt{
WithImageFallback(image.Fallback), WithImageFallback(image.Fallback),
WithImageSave(image.Save), WithImageSave(image.Save),
} WithImageForcePull(image.Force),
if image.Force {
opts = append(opts, WithImageForcePull())
} }
logger.Info("☑️ 获取镜像: %s", image.Name) logger.Info("☑️ 获取镜像: %s", image.Name)

View File

@@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"gitea.loveuer.com/yizhisec/pkg3/logger" "gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt" "yizhisec.com/hsv2/forge/internal/opt"
@@ -30,7 +29,7 @@ func WithMySQLReplica(replica int) MysqlOpt {
func WithMySQLStorage(storage string) MysqlOpt { func WithMySQLStorage(storage string) MysqlOpt {
return func(o *mysqlOpt) { return func(o *mysqlOpt) {
// validate Kubernetes storage size string (e.g., "50Gi", "100Mi") // validate Kubernetes storage size string (e.g., "50Gi", "100Mi")
if matched, _ := regexp.MatchString(`^\d+(\.\d+)?[EPTGMK]i?$`, storage); matched { if opt.StorageSizeReg.MatchString(storage) {
o.Storage = storage o.Storage = storage
} }
} }

View File

@@ -10,21 +10,23 @@ import (
"yizhisec.com/hsv2/forge/pkg/downloader" "yizhisec.com/hsv2/forge/pkg/downloader"
) )
// make proxy for 8443, 443
// by caddy, managed by systemd
// steps:
// 1. download caddy release binary: url(https://artifactory.yizhisec.com:443/artifactory/filestore/hsv2/bin/caddy)
// 2. generate caddyfile
// 3. generate systemd service file
func (m *maker) Proxy(ctx context.Context) error { func (m *maker) Proxy(ctx context.Context) error {
const ( const (
binURL = "https://artifactory.yizhisec.com:443/artifactory/filestore/hsv2/bin/caddy" binURL = "https://artifactory.yizhisec.com:443/artifactory/filestore/hsv2/bin/caddy"
caddyfileTpl = `:8443 { caddyfileTpl = `{
reverse_proxy __UPSTREAM_8443__ layer4 {
} :8443 {
route {
proxy __UPSTREAMS_8443__
}
}
:443 { :443 {
reverse_proxy __UPSTREAM_443__ route {
proxy __UPSTREAMS_443__
}
}
}
}` }`
systemdSvc = `[Unit] systemdSvc = `[Unit]
Description=YiZhiSec Caddy Reverse Proxy Description=YiZhiSec Caddy Reverse Proxy
@@ -38,7 +40,7 @@ StandardOutput=journal
StandardError=journal StandardError=journal
Nice=-20 Nice=-20
Restart=always Restart=always
RestartSec=15 RestartSec=5
[Install] [Install]
WantedBy=multi-user.target` WantedBy=multi-user.target`

View File

@@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"gitea.loveuer.com/yizhisec/pkg3/logger" "gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt" "yizhisec.com/hsv2/forge/internal/opt"
@@ -38,7 +37,7 @@ func WithRedisPassword(password string) RedisOpt {
func WithRedisStorage(storage string) RedisOpt { func WithRedisStorage(storage string) RedisOpt {
return func(o *redisOpt) { return func(o *redisOpt) {
if matched, _ := regexp.MatchString(`^\d+(\.\d+)?[EPTGMK]i?$`, storage); matched { if opt.StorageSizeReg.MatchString(storage) {
o.Storage = storage o.Storage = storage
} }
} }

View File

@@ -0,0 +1,212 @@
package maker
import (
"context"
"fmt"
"os"
"path/filepath"
"gitea.loveuer.com/yizhisec/pkg3/logger"
"yizhisec.com/hsv2/forge/internal/opt"
"yizhisec.com/hsv2/forge/pkg/model"
"yizhisec.com/hsv2/forge/pkg/resource"
)
type SeafileOpt func(*seafileOpt)
type seafileOpt struct {
DBHost string
DBPassword string
AdminEmail string
AdminPassword string
ServerHostname string
Storage string
}
func WithSeafileStorage(storage string) SeafileOpt {
return func(o *seafileOpt) {
if opt.StorageSizeReg.MatchString(storage) {
o.Storage = storage
}
}
}
func WithSeafileDBHost(host string) SeafileOpt {
return func(o *seafileOpt) {
if o.DBHost == "" {
o.DBHost = host
}
}
}
func WithSeafileDBPassword(password string) SeafileOpt {
return func(o *seafileOpt) {
if o.DBPassword == "" {
o.DBPassword = password
}
}
}
func WithSeafileAdminEmail(email string) SeafileOpt {
return func(o *seafileOpt) {
if opt.EmailReg.MatchString(email) {
o.AdminEmail = email
}
}
}
func WithSeafileAdminPassword(password string) SeafileOpt {
return func(o *seafileOpt) {
if o.AdminPassword == "" {
o.AdminPassword = password
}
}
}
func WithSeafileHostname(hostname string) SeafileOpt {
return func(o *seafileOpt) {
if o.ServerHostname == "" {
o.ServerHostname = hostname
}
}
}
func (m *maker) Seafile(ctx context.Context, opts ...SeafileOpt) error {
const (
_config = `
ControllerServer:
UserManagement: user-service.hsv2:9013
Database:
Mysql:
Address: %s:3306
DBName: backup_server
Password: %s
SeafileDBName: seafile_db
UserName: root
Log:
Dir: ./log
Level: 1
Name: hs_backup_seafile
SeafileServer:
Admin: %s
AdminPassword: %s
BackupDir: /seafile/backup_data
Host: seafile-service
Port: 80
StorageDir: /seafile/storage
Sentry:
DSN: https://fd7149f063c211eda2b50242ac15001c@sentry.yizhisec.com:13443/7
TracesSampleRate: 1
Web:
Host: 0.0.0.0
Mode: release
Port: 9027
WorkDir: /yizhisec/hs_backup_seafile/workspace
YosGuard:
Host: 172.17.0.1
Port: 7788`
_upsert = `#!/bin/bash
kubectl create configmap config-backup-seafile --namespace seafile --from-file=config.yml=./config.yml --dry-run=client -o yaml | kubectl apply -f -
kubectl create configmap nginx-seafile --namespace hsv2 --from-file=seafile.conf=./nginx.conf --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -f deployment.yaml
kubectl rollout restart deployment backup-seafile-deployment -n seafile`
)
var (
err error
o = &seafileOpt{
DBHost: "mysql-cluster-mysql-master.db-mysql",
DBPassword: "L0hMysql.",
AdminEmail: "admin@yizhisec.com",
AdminPassword: "asecret",
ServerHostname: "cloud.hybridscope.com",
Storage: "50Gi",
}
workdir = filepath.Join(opt.Cfg.Make.Dir, "dependency", "seafile")
)
for _, fn := range opts {
fn(o)
}
logger.Info("☑️ maker.Seafile: 开始构建 seafile 依赖, dir = %s", workdir)
// 1. 准备工作目录
logger.Debug("☑️ make.Seafile: 准备工作目录: %s", workdir)
if err = os.MkdirAll(workdir, 0755); err != nil {
logger.Error("❌ make.Seafile: 准备工作目录: %s 失败, err = %v", workdir, err)
return err
}
logger.Debug("✅ make.Seafile: 准备工作目录: %s 成功", workdir)
// 2. seafile yaml
logger.Debug("☑️ make.Seafile: 准备 seafile yaml")
bs := []byte(fmt.Sprintf(resource.YAMLSeafile, o.DBHost, o.DBPassword, o.AdminEmail, o.AdminPassword, o.ServerHostname, o.Storage))
if err = os.WriteFile(filepath.Join(workdir, "seafile.yaml"), bs, 0644); err != nil {
logger.Error("❌ make.Seafile: 准备 seafile yaml: %s 失败, err = %v", filepath.Join(workdir, "seafile.yaml"), err)
return err
}
logger.Debug("✅ make.Seafile: 准备 seafile yaml 成功")
// 3. backup-seafile deployment
logger.Debug("☑️ make.Seafile: 准备 backup-seafile deployment")
bs = []byte(resource.YAMLBackupSeafile)
if err = os.WriteFile(filepath.Join(workdir, "deployment.yaml"), bs, 0644); err != nil {
logger.Error("❌ make.Seafile: 准备 backup-seafile deployment: %s 失败, err = %v", filepath.Join(workdir, "deployment.yaml"), err)
return err
}
logger.Debug("✅ make.Seafile: 准备 backup-seafile deployment 成功")
// 4. config.yml
logger.Debug("☑️ make.Seafile: 准备 config.yml")
bs = []byte(fmt.Sprintf(_config, o.DBHost, o.DBPassword, o.AdminEmail, o.AdminPassword))
if err = os.WriteFile(filepath.Join(workdir, "config.yml"), bs, 0644); err != nil {
logger.Error("❌ make.Seafile: 准备 config.yml: %s 失败, err = %v", filepath.Join(workdir, "config.yml"), err)
return err
}
logger.Debug("✅ make.Seafile: 准备 config.yml 成功")
// 5. seafile.conf
logger.Debug("☑️ make.Seafile: 准备 seafile.conf")
bs = resource.NGINXSeafile
if err = os.WriteFile(filepath.Join(workdir, "seafile.conf"), bs, 0644); err != nil {
logger.Error("❌ make.Seafile: 准备 seafile.conf: %s 失败, err = %v", filepath.Join(workdir, "seafile.conf"), err)
return err
}
logger.Debug("✅ make.Seafile: 准备 seafile.conf 成功")
// 6. upsert.sh
logger.Debug("☑️ make.Seafile: 准备 upsert.sh")
bs = []byte(_upsert)
if err = os.WriteFile(filepath.Join(workdir, "upsert.sh"), bs, 0755); err != nil {
logger.Error("❌ make.Seafile: 准备 upsert.sh: %s 失败, err = %v", filepath.Join(workdir, "upsert.sh"), err)
return err
}
logger.Debug("✅ make.Seafile: 准备 upsert.sh 成功")
// 7. prepare images
logger.Debug("☑️ make.Seafile: 准备 images")
imgDir := filepath.Join(opt.Cfg.Make.Dir, "dependency", "image")
if err = os.MkdirAll(imgDir, 0755); err != nil {
logger.Error("❌ make.Seafile: 准备 images 目录: %s 失败, err = %v", imgDir, err)
return err
}
var images = []*model.Image{
{Name: "hub.yizhisec.com/hybridscope/hs_backup_seafile:latest", Fallback: "", Save: "seafile.backup_seafile.tar", Force: true},
{Name: "hub.yizhisec.com/product/hybridscope/memcached:latest", Fallback: "", Save: "seafile.memcached.tar"},
{Name: "hub.yizhisec.com/product/hybridscope/seafile-mc:latest", Fallback: "", Save: "seafile.seafile_mc.tar"},
}
for _, img := range images {
img.Save = filepath.Join(imgDir, img.Save)
if err = m.Image(ctx, img.Name, WithImageSave(img.Save), WithImageForcePull(img.Force)); err != nil {
logger.Error("❌ make.Seafile: 准备 image: %s 失败, err = %v", img.Name, err)
return err
}
}
logger.Debug("✅ make.Seafile: 准备 images 成功")
logger.Info("✅ maker.Seafile: 构建 seafile 依赖成功!!!")
return nil
}

View File

@@ -1,5 +1,7 @@
package opt package opt
import "regexp"
type config struct { type config struct {
Debug bool Debug bool
Make struct { Make struct {
@@ -11,3 +13,8 @@ type config struct {
var ( var (
Cfg = &config{} Cfg = &config{}
) )
var (
StorageSizeReg = regexp.MustCompile(`^\d+(\.\d+)?[EPTGMK]i?$`)
EmailReg = regexp.MustCompile(`^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$`)
)

8
pkg/model/image.go Normal file
View File

@@ -0,0 +1,8 @@
package model
type Image struct {
Name string
Fallback string
Save string
Force bool
}

View File

@@ -0,0 +1,71 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: client-deployment
namespace: hsv2
spec:
replicas: %d
selector:
matchLabels:
app: client
template:
metadata:
labels:
app: client
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: client
containers:
- name: client
image: hub.yizhisec.com/hybridscope/client_server:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: ssl-pub-crt
mountPath: /etc/yizhisec/license/pub_key
subPath: pub_key
readOnly: true
- name: config-volume
mountPath: /etc/client_server/config.yml
subPath: config.yml
readOnly: true
volumes:
- name: ssl-pub-crt
configMap:
name: ssl-pub-crt
items:
- key: pub_key
path: pub_key
- name: config-volume
configMap:
name: config-client
items:
- key: config.yml
path: config.yml
---
apiVersion: v1
kind: Service
metadata:
name: client-service
namespace: hsv2
spec:
selector:
app: client
ports:
- protocol: TCP
name: web
port: 9129
targetPort: 9129
- protocol: TCP
name: web2
port: 9024
targetPort: 9024
- protocol: TCP
name: web-message
port: 9025
targetPort: 9025
type: ClusterIP

View File

@@ -0,0 +1,83 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gateway-deployment
namespace: hsv2
spec:
replicas: %d
selector:
matchLabels:
app: gateway
template:
metadata:
labels:
app: gateway
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: gateway
containers:
- name: gateway
image: hub.yizhisec.com/hybridscope/gateway_controller:latest
imagePullPolicy: IfNotPresent
volumeMounts:
- name: config-volume
mountPath: /etc/gateway_controller/config.yml
subPath: config.yml
readOnly: true
- name: config-token
mountPath: /etc/yizhisec/token
subPath: token
readOnly: true
- name: ssl-client-crt
mountPath: /yizhisec/ssl/client.crt
subPath: client.crt
readOnly: true
- name: ssl-client-key
mountPath: /yizhisec/ssl/client.key
subPath: client.key
readOnly: true
volumes:
- name: config-volume
configMap:
name: config-gateway
items:
- key: config.yml
path: config.yml
- name: config-token
configMap:
name: config-token
items:
- key: token
path: token
- name: ssl-client-crt
configMap:
name: ssl-client-crt
items:
- key: client.crt
path: client.crt
- name: ssl-client-key
configMap:
name: ssl-client-key
items:
- key: client.key
path: client.key
---
apiVersion: v1
kind: Service
metadata:
name: gateway-service
namespace: hsv2
spec:
selector:
app: gateway
ports:
- protocol: TCP
name: gateway-api
port: 9012
targetPort: 9012
type: ClusterIP

View File

@@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-deployment
namespace: hsv2
spec:
replicas: %d
selector:
matchLabels:
app: api
template:
metadata:
labels:
app: api
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: api
containers:
- name: api
image: hub.yizhisec.com/hybridscope/mie-server:latest
imagePullPolicy: IfNotPresent
args: ["server.app", "--master", "--debug"]
volumeMounts:
- name: ssl-pub-crt
mountPath: /etc/mie-server/license/pub_key
subPath: pub_key
readOnly: true
- name: config-volume
mountPath: /yizhisec/web_server/conf/conf.yml
subPath: conf.yml
readOnly: true
- name: config-license-init
mountPath: /etc/mie-server/server_license_init.conf
subPath: server_license_init.conf
readOnly: true
- name: ssl-web-crt
mountPath: /yizhisec/hs_nginx/ssl/web.server.crt
subPath: web.server.crt
readOnly: true
- name: config-oem-data
mountPath: /yizhisec/hs_nginx/data/443/oem/data.json
subPath: data.json
readOnly: true
volumes:
- name: ssl-pub-crt
configMap:
name: ssl-pub-crt
items:
- key: pub_key
path: pub_key
- name: config-volume
configMap:
name: config-api
items:
- key: conf.yml
path: conf.yml
- name: config-license-init
configMap:
name: config-license-init
items:
- key: server_license_init.conf
path: server_license_init.conf
- name: ssl-web-crt
configMap:
name: ssl-web-crt
items:
- key: web.server.crt
path: web.server.crt
- name: config-oem-data
configMap:
name: config-oem-data
items:
- key: data.json
path: data.json
---
apiVersion: v1
kind: Service
metadata:
name: api-service
namespace: hsv2
spec:
selector:
app: api
ports:
- protocol: TCP
name: mie-api
port: 9002
targetPort: 9002
type: ClusterIP

View File

@@ -0,0 +1,32 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-cron-deployment
namespace: hsv2
spec:
replicas: 1
selector:
matchLabels:
app: api-cron
template:
metadata:
labels:
app: api-cron
spec:
containers:
- name: api-cron
image: hub.yizhisec.com/hybridscope/mie-server:latest
imagePullPolicy: IfNotPresent
args: ["server.schedule"]
volumeMounts:
- name: config-volume
mountPath: /yizhisec/web_server/conf/conf.yml
subPath: conf.yml
readOnly: true
volumes:
- name: config-volume
configMap:
name: config-api
items:
- key: conf.yml
path: conf.yml

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-sweeper-deployment
namespace: hsv2
spec:
replicas: 1
selector:
matchLabels:
app: api-sweeper
template:
metadata:
labels:
app: api-sweeper
spec:
containers:
- name: api-sweeper
image: hub.yizhisec.com/hybridscope/mie-server:latest
imagePullPolicy: IfNotPresent
args: ["server.delay_sweeper"]
volumeMounts:
- name: config-volume
mountPath: /yizhisec/web_server/conf/conf.yml
subPath: conf.yml
readOnly: true
- name: log-data
mountPath: /yizhisec/web_server/logs
volumes:
- name: log-data
emptyDir: {}
- name: config-volume
configMap:
name: config-api
items:
- key: conf.yml
path: conf.yml

View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-worker-deployment
namespace: hsv2
spec:
replicas: 1
selector:
matchLabels:
app: api-worker
template:
metadata:
labels:
app: api-worker
spec:
containers:
- name: api-worker
image: hub.yizhisec.com/hybridscope/mie-server:latest
imagePullPolicy: IfNotPresent
args: ["server.delay_worker"]
volumeMounts:
- name: config-volume
mountPath: /yizhisec/web_server/conf/conf.yml
subPath: conf.yml
readOnly: true
- name: log-data
mountPath: /yizhisec/web_server/logs
volumes:
- name: log-data
emptyDir: {}
- name: config-volume
configMap:
name: config-api
items:
- key: conf.yml
path: conf.yml

42
pkg/resource/app.oem.yaml Normal file
View File

@@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: oem-deployment
namespace: hsv2
spec:
replicas: %d
selector:
matchLabels:
app: oem
template:
metadata:
labels:
app: oem
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: oem
containers:
- name: oem
image: %s
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: oem-service
namespace: hsv2
spec:
selector:
app: oem
ports:
- protocol: TCP
port: 80
targetPort: 80
type: ClusterIP

View File

@@ -0,0 +1,63 @@
kind: Deployment
metadata:
name: user-deployment
namespace: hsv2
spec:
replicas: %d
selector:
matchLabels:
app: user
template:
metadata:
labels:
app: user
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: user
containers:
- name: user
image: hub.yizhisec.com/hybridscope/user_management:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9013
volumeMounts:
- name: config-volume
mountPath: /etc/hs_user_management/config.yml
subPath: config.yml
readOnly: true
- name: ssl-client-crt
mountPath: /etc/hs_user_management/proxy/certs/client.crt
subPath: client.crt
readOnly: true
volumes:
- name: config-volume
configMap:
name: config-user
items:
- key: config.yml
path: config.yml
- name: ssl-client-crt
configMap:
name: ssl-client-crt
items:
- key: client.crt
path: client.crt
---
apiVersion: v1
kind: Service
metadata:
name: user-service
namespace: hsv2
spec:
selector:
app: user
ports:
- protocol: TCP
port: 9013
targetPort: 9013
type: ClusterIP

View File

@@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: backup-seafile-deployment
namespace: seafile
spec:
replicas: 1
selector:
matchLabels:
app: backup-seafile
template:
metadata:
labels:
app: backup-seafile
spec:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app: backup-seafile
containers:
- name: backup-seafile
image: hub.yizhisec.com/hybridscope/hs_backup_seafile:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9027
volumeMounts:
- name: config-volume
mountPath: /etc/hs_backup_seafile/config.yml
subPath: config.yml
readOnly: true
volumes:
- name: config-volume
configMap:
name: config-backup-seafile
items:
- key: config.yml
path: config.yml
---
apiVersion: v1
kind: Service
metadata:
name: backup-seafile-service
namespace: seafile
spec:
selector:
app: backup-seafile
ports:
- protocol: TCP
port: 9027
targetPort: 9027
type: ClusterIP

View File

@@ -25,4 +25,37 @@ var (
//go:embed less-dns.yaml //go:embed less-dns.yaml
YAMLLessDNS []byte YAMLLessDNS []byte
//go:embed seafile.yaml
YAMLSeafile string
//go:embed backup-seafile.yaml
YAMLBackupSeafile string
//go:embed seafile.conf
NGINXSeafile []byte
//go:embed app.user.yaml
YAMLAppUser string
//go:embed app.gateway.yaml
YAMLAppGateway string
//go:embed app.client.yaml
YAMLAppClient string
//go:embed app.mie.api.yaml
YAMLAppMieAPI string
//go:embed app.mie.worker.yaml
YAMLAppMieWorker []byte
//go:embed app.mie.cron.yaml
YAMLAppMieCron []byte
//go:embed app.mie.sweeper.yaml
YAMLAppMieSweeper []byte
//go:embed app.oem.yaml
YAMLAppOEM string
) )

81
pkg/resource/seafile.conf Normal file
View File

@@ -0,0 +1,81 @@
server {
listen 443 ssl proxy_protocol;
server_name seafile.yizhisec.com cloud.hybridscope.com seafile-yizhise-com cloud-hybridscope-com;
ssl_certificate /etc/nginx/ssl/client.server.crt;
ssl_certificate_key /etc/nginx/ssl/client.server.key;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
add_header Strict-Transport-Security "max-age=63072000" always;
ssl_trusted_certificate /etc/nginx/ssl/client.ca.crt;
ssl_client_certificate /etc/nginx/ssl/client.ca.crt;
ssl_verify_client on;
client_max_body_size 500M;
# location = /auth-sharing {
# internal;
# proxy_pass http://client-service:9129/api/auth-sharing;
# proxy_http_version 1.1;
# proxy_pass_request_body off;
# proxy_set_header Content-Length "";
# proxy_set_header X-Original-URI $request_uri;
# }
location /f/ {
rewrite ^(.+[^/])$ $1/ last; # 补上末尾的 /,避免重定向两次
# auth_request /auth-sharing;
# proxy_pass http://hs-openresty:13381;
proxy_pass http://seafile-service.seafile;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 300s;
}
location /api/v1/ {
proxy_pass http://backup-seafile-service.seafile:9027;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 300s;
}
location /api2/ {
# proxy_pass http://hs-resource-server:19980;
proxy_pass http://seafile-service.seafile;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 300s;
}
location /api/v2.1/ {
# proxy_pass http://hs-resource-server:19980;
proxy_pass http://seafile-service.seafile;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_read_timeout 300s;
}
location /seafhttp/ {
# proxy_pass http://hs-resource-server:19980;
proxy_pass http://seafile-service.seafile;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 300s;
}
}

View File

@@ -20,7 +20,8 @@ spec:
spec: spec:
containers: containers:
- name: memcached - name: memcached
image: hub.yizhisec.com/product/hybridscope/memcached image: hub.yizhisec.com/product/hybridscope/memcached:latest
imagePullPolicy: IfNotPresent
args: ["-m", "256"] args: ["-m", "256"]
ports: ports:
- containerPort: 11211 - containerPort: 11211
@@ -60,19 +61,19 @@ spec:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: DB_HOST - name: DB_HOST
value: "mysql-cluster-mysql-master.db-mysql" value: "%s"
- name: DB_ROOT_PASSWD - name: DB_ROOT_PASSWD
value: "L0hMysql." #db's password value: "%s" #db's password
- name: TIME_ZONE - name: TIME_ZONE
value: "Asia/Shanghai" value: "Asia/Shanghai"
- name: SEAFILE_ADMIN_EMAIL - name: SEAFILE_ADMIN_EMAIL
value: "admin@yizhisec.com" #admin email value: "%s" #admin email
- name: SEAFILE_ADMIN_PASSWORD - name: SEAFILE_ADMIN_PASSWORD
value: "asecret" #admin password value: "%s" #admin password
- name: SEAFILE_SERVER_LETSENCRYPT - name: SEAFILE_SERVER_LETSENCRYPT
value: "false" value: "false"
- name: SEAFILE_SERVER_HOSTNAME - name: SEAFILE_SERVER_HOSTNAME
value: "cloud.hybridscope.com" #hostname value: "%s" #hostname
ports: ports:
- containerPort: 80 - containerPort: 80
volumeMounts: volumeMounts:
@@ -95,4 +96,18 @@ spec:
storageClassName: longhorn storageClassName: longhorn
resources: resources:
requests: requests:
storage: 10Gi storage: %s
---
apiVersion: v1
kind: Service
metadata:
name: seafile-service
namespace: seafile
spec:
selector:
app: seafile
type: ClusterIP
ports:
- protocol: TCP
port: 80
targetPort: 80