Fix SSE connection handling and optimize Dockerfile
- Fixed SSE connection not being properly closed when pod logs dialog is closed - Added proper cleanup for EventSource connections in K8sResourceList.tsx - Added debugging logs to track SSE connection lifecycle - Optimized Dockerfile to avoid copying frontend files during Go build stage - Fixed backend handler to properly use context from request for log streaming 🤖 Generated with [Qoder][https://qoder.com]
This commit is contained in:
63
Dockerfile
Normal file
63
Dockerfile
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
# Multi-stage build for Cluster application with Go backend and React frontend
|
||||||
|
|
||||||
|
# Frontend build stage
|
||||||
|
FROM node:18 AS frontend-build
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy package files
|
||||||
|
COPY frontend/package.json frontend/pnpm-lock.yaml ./
|
||||||
|
|
||||||
|
# Install pnpm globally
|
||||||
|
RUN npm install -g pnpm
|
||||||
|
|
||||||
|
# Install frontend dependencies
|
||||||
|
RUN pnpm install --frozen-lockfile
|
||||||
|
|
||||||
|
# Copy frontend source
|
||||||
|
COPY frontend/ .
|
||||||
|
|
||||||
|
# Build frontend
|
||||||
|
RUN pnpm run build
|
||||||
|
|
||||||
|
# Backend build stage
|
||||||
|
FROM golang:1.22 AS backend-build
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy go mod files
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
|
||||||
|
# Download dependencies
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy only backend source code
|
||||||
|
COPY main.go ./
|
||||||
|
COPY internal/ ./internal/
|
||||||
|
COPY pkg/ ./pkg/
|
||||||
|
|
||||||
|
# Build backend
|
||||||
|
RUN go build -o cluster .
|
||||||
|
|
||||||
|
# Final stage - Nginx server
|
||||||
|
FROM nginx:latest
|
||||||
|
|
||||||
|
# Copy nginx configuration
|
||||||
|
COPY nginx.conf /etc/nginx/nginx.conf
|
||||||
|
|
||||||
|
# Copy backend binary
|
||||||
|
COPY --from=backend-build /app/cluster /app/cluster
|
||||||
|
|
||||||
|
# Copy frontend build
|
||||||
|
COPY --from=frontend-build /app/dist /usr/share/nginx/html
|
||||||
|
|
||||||
|
# Create data directory
|
||||||
|
RUN mkdir -p /app/x-storage
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
EXPOSE 80
|
||||||
|
|
||||||
|
# Start backend and nginx
|
||||||
|
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||||
|
RUN chmod +x /docker-entrypoint.sh
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
10
docker-entrypoint.sh
Executable file
10
docker-entrypoint.sh
Executable file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Start the Go backend in the background
|
||||||
|
/app/cluster -address 127.0.0.1:9119 -data-dir /data &
|
||||||
|
|
||||||
|
# Wait a moment for backend to start
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
# Start nginx in the foreground
|
||||||
|
nginx -g 'daemon off;'
|
||||||
@@ -76,6 +76,7 @@ export default function K8sResourceList() {
|
|||||||
const [logsDialogOpen, setLogsDialogOpen] = useState(false)
|
const [logsDialogOpen, setLogsDialogOpen] = useState(false)
|
||||||
const [logs, setLogs] = useState<string[]>([])
|
const [logs, setLogs] = useState<string[]>([])
|
||||||
const [selectedPod, setSelectedPod] = useState<{ name: string; namespace: string } | null>(null)
|
const [selectedPod, setSelectedPod] = useState<{ name: string; namespace: string } | null>(null)
|
||||||
|
const eventSourceRef = useRef<EventSource | null>(null)
|
||||||
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false)
|
||||||
const [deleteTarget, setDeleteTarget] = useState<{ name: string; namespace: string } | null>(null)
|
const [deleteTarget, setDeleteTarget] = useState<{ name: string; namespace: string } | null>(null)
|
||||||
const [deleting, setDeleting] = useState(false)
|
const [deleting, setDeleting] = useState(false)
|
||||||
@@ -98,6 +99,17 @@ export default function K8sResourceList() {
|
|||||||
}
|
}
|
||||||
}, [selectedKind, namespace, nameFilter])
|
}, [selectedKind, namespace, nameFilter])
|
||||||
|
|
||||||
|
// Clean up SSE connection on component unmount
|
||||||
|
useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
if (eventSourceRef.current) {
|
||||||
|
console.log('Cleaning up SSE connection on component unmount')
|
||||||
|
eventSourceRef.current.close()
|
||||||
|
eventSourceRef.current = null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [])
|
||||||
|
|
||||||
const fetchKubeconfig = async () => {
|
const fetchKubeconfig = async () => {
|
||||||
try {
|
try {
|
||||||
const res = await fetch('/api/v1/k8s/config')
|
const res = await fetch('/api/v1/k8s/config')
|
||||||
@@ -171,24 +183,62 @@ export default function K8sResourceList() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const handleViewLogs = (podName: string, podNamespace: string) => {
|
const handleViewLogs = (podName: string, podNamespace: string) => {
|
||||||
|
console.log('handleViewLogs called with:', { podName, podNamespace })
|
||||||
setSelectedPod({ name: podName, namespace: podNamespace })
|
setSelectedPod({ name: podName, namespace: podNamespace })
|
||||||
setLogs([])
|
setLogs([])
|
||||||
setLogsDialogOpen(true)
|
setLogsDialogOpen(true)
|
||||||
|
|
||||||
|
// Close any existing connection
|
||||||
|
if (eventSourceRef.current) {
|
||||||
|
console.log('Closing existing EventSource connection')
|
||||||
|
eventSourceRef.current.close()
|
||||||
|
eventSourceRef.current = null
|
||||||
|
}
|
||||||
|
|
||||||
const eventSource = new EventSource(
|
const eventSource = new EventSource(
|
||||||
`/api/v1/k8s/pod/logs?name=${encodeURIComponent(podName)}&namespace=${encodeURIComponent(podNamespace)}&tail=1000&follow=true`
|
`/api/v1/k8s/pod/logs?name=${encodeURIComponent(podName)}&namespace=${encodeURIComponent(podNamespace)}&tail=1000&follow=true`
|
||||||
)
|
)
|
||||||
|
|
||||||
eventSource.onmessage = (event) => {
|
// Save reference to the EventSource
|
||||||
setLogs((prev) => [...prev, event.data])
|
eventSourceRef.current = eventSource
|
||||||
setTimeout(() => logsEndRef.current?.scrollIntoView({ behavior: 'smooth' }), 100)
|
|
||||||
}
|
// Listen for the specific event type 'pod-logs'
|
||||||
|
eventSource.addEventListener('pod-logs', (event: MessageEvent) => {
|
||||||
|
try {
|
||||||
|
const message = JSON.parse(event.data)
|
||||||
|
if (message.type === 'log') {
|
||||||
|
setLogs((prev) => [...prev, message.data])
|
||||||
|
setTimeout(() => logsEndRef.current?.scrollIntoView({ behavior: 'smooth' }), 100)
|
||||||
|
} else if (message.type === 'EOF') {
|
||||||
|
// Handle end of stream if needed
|
||||||
|
} else if (message.type === 'error') {
|
||||||
|
setLogs((prev) => [...prev, `Error: ${message.data}`])
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// If parsing fails, treat as plain text (fallback)
|
||||||
|
setLogs((prev) => [...prev, event.data])
|
||||||
|
setTimeout(() => logsEndRef.current?.scrollIntoView({ behavior: 'smooth' }), 100)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
eventSource.onerror = () => {
|
eventSource.onerror = () => {
|
||||||
eventSource.close()
|
console.log('EventSource error occurred')
|
||||||
|
if (eventSourceRef.current) {
|
||||||
|
eventSourceRef.current.close()
|
||||||
|
eventSourceRef.current = null
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return () => eventSource.close()
|
const handleCloseLogsDialog = () => {
|
||||||
|
console.log('handleCloseLogsDialog called')
|
||||||
|
// Close the EventSource connection if it exists
|
||||||
|
if (eventSourceRef.current) {
|
||||||
|
console.log('Closing EventSource connection')
|
||||||
|
eventSourceRef.current.close()
|
||||||
|
eventSourceRef.current = null
|
||||||
|
}
|
||||||
|
setLogsDialogOpen(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
const handleDeleteResource = async () => {
|
const handleDeleteResource = async () => {
|
||||||
@@ -843,7 +893,7 @@ export default function K8sResourceList() {
|
|||||||
|
|
||||||
<Dialog
|
<Dialog
|
||||||
open={logsDialogOpen}
|
open={logsDialogOpen}
|
||||||
onClose={() => setLogsDialogOpen(false)}
|
onClose={handleCloseLogsDialog}
|
||||||
maxWidth="lg"
|
maxWidth="lg"
|
||||||
fullWidth
|
fullWidth
|
||||||
>
|
>
|
||||||
@@ -852,7 +902,7 @@ export default function K8sResourceList() {
|
|||||||
<Typography variant="h6">
|
<Typography variant="h6">
|
||||||
Pod 日志: {selectedPod?.name} ({selectedPod?.namespace})
|
Pod 日志: {selectedPod?.name} ({selectedPod?.namespace})
|
||||||
</Typography>
|
</Typography>
|
||||||
<IconButton onClick={() => setLogsDialogOpen(false)}>
|
<IconButton onClick={handleCloseLogsDialog}>
|
||||||
<CloseIcon />
|
<CloseIcon />
|
||||||
</IconButton>
|
</IconButton>
|
||||||
</Box>
|
</Box>
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
@@ -22,7 +23,6 @@ import (
|
|||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func getK8sClient(db *gorm.DB) (*kubernetes.Clientset, error) {
|
func getK8sClient(db *gorm.DB) (*kubernetes.Clientset, error) {
|
||||||
@@ -386,25 +386,25 @@ func K8sResourceGet(ctx context.Context, db *gorm.DB, store store.Store) fiber.H
|
|||||||
|
|
||||||
func getResourceName(kind string) string {
|
func getResourceName(kind string) string {
|
||||||
kindToResource := map[string]string{
|
kindToResource := map[string]string{
|
||||||
"Namespace": "namespaces",
|
"Namespace": "namespaces",
|
||||||
"Deployment": "deployments",
|
"Deployment": "deployments",
|
||||||
"StatefulSet": "statefulsets",
|
"StatefulSet": "statefulsets",
|
||||||
"Service": "services",
|
"Service": "services",
|
||||||
"ConfigMap": "configmaps",
|
"ConfigMap": "configmaps",
|
||||||
"Pod": "pods",
|
"Pod": "pods",
|
||||||
"PersistentVolume": "persistentvolumes",
|
"PersistentVolume": "persistentvolumes",
|
||||||
"PersistentVolumeClaim": "persistentvolumeclaims",
|
"PersistentVolumeClaim": "persistentvolumeclaims",
|
||||||
"Secret": "secrets",
|
"Secret": "secrets",
|
||||||
"Ingress": "ingresses",
|
"Ingress": "ingresses",
|
||||||
"DaemonSet": "daemonsets",
|
"DaemonSet": "daemonsets",
|
||||||
"Job": "jobs",
|
"Job": "jobs",
|
||||||
"CronJob": "cronjobs",
|
"CronJob": "cronjobs",
|
||||||
"ReplicaSet": "replicasets",
|
"ReplicaSet": "replicasets",
|
||||||
"ServiceAccount": "serviceaccounts",
|
"ServiceAccount": "serviceaccounts",
|
||||||
"Role": "roles",
|
"Role": "roles",
|
||||||
"RoleBinding": "rolebindings",
|
"RoleBinding": "rolebindings",
|
||||||
"ClusterRole": "clusterroles",
|
"ClusterRole": "clusterroles",
|
||||||
"ClusterRoleBinding": "clusterrolebindings",
|
"ClusterRoleBinding": "clusterrolebindings",
|
||||||
}
|
}
|
||||||
|
|
||||||
if resource, ok := kindToResource[kind]; ok {
|
if resource, ok := kindToResource[kind]; ok {
|
||||||
@@ -726,21 +726,22 @@ func K8sPodLogs(ctx context.Context, db *gorm.DB, store store.Store) fiber.Handl
|
|||||||
|
|
||||||
req := clientset.CoreV1().Pods(namespace).GetLogs(podName, podLogOpts)
|
req := clientset.CoreV1().Pods(namespace).GetLogs(podName, podLogOpts)
|
||||||
|
|
||||||
logCtx, cancel := context.WithCancel(context.Background())
|
logCtx, cancel := context.WithCancel(c.Context())
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
stream, err := req.Stream(logCtx)
|
stream, err := req.Stream(logCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
cancel()
|
||||||
return resp.R500(c, "", nil, fmt.Errorf("failed to get pod logs: %w", err))
|
return resp.R500(c, "", nil, fmt.Errorf("failed to get pod logs: %w", err))
|
||||||
}
|
}
|
||||||
defer stream.Close()
|
|
||||||
|
|
||||||
// Use the existing SSE manager from resp package
|
// Use the existing SSE manager from resp package
|
||||||
manager := resp.SSE(c, "pod-logs")
|
manager := resp.SSE(c, "pod-logs")
|
||||||
|
|
||||||
// Start streaming logs in a goroutine
|
// Start streaming logs in a goroutine
|
||||||
go func() {
|
go func() {
|
||||||
|
defer stream.Close()
|
||||||
defer manager.Close()
|
defer manager.Close()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
reader := bufio.NewReader(stream)
|
reader := bufio.NewReader(stream)
|
||||||
for {
|
for {
|
||||||
@@ -751,20 +752,18 @@ func K8sPodLogs(ctx context.Context, db *gorm.DB, store store.Store) fiber.Handl
|
|||||||
line, err := reader.ReadString('\n')
|
line, err := reader.ReadString('\n')
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
manager.Send("[EOF]")
|
manager.JSON(map[string]any{"type": "EOF"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
manager.Send(fmt.Sprintf("error: %v", err))
|
manager.JSON(map[string]any{"type": "error", "data": err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
manager.Send(line)
|
manager.JSON(map[string]any{"data": line, "type": "log"})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Return nil since we're handling the response directly
|
return c.SendStreamWriter(manager.Writer())
|
||||||
c.Context().SetBodyStreamWriter(manager.Writer())
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
51
nginx.conf
Normal file
51
nginx.conf
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
include /etc/nginx/mime.types;
|
||||||
|
default_type application/octet-stream;
|
||||||
|
|
||||||
|
upstream backend {
|
||||||
|
server 127.0.0.1:9119;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name localhost;
|
||||||
|
|
||||||
|
# Serve static files
|
||||||
|
location / {
|
||||||
|
root /usr/share/nginx/html;
|
||||||
|
index index.html;
|
||||||
|
try_files $uri $uri/ /index.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Proxy API requests to backend
|
||||||
|
location /api/ {
|
||||||
|
proxy_pass http://backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Proxy OCI registry v2 requests to backend
|
||||||
|
location /v2/ {
|
||||||
|
proxy_pass http://backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Proxy registry requests to backend
|
||||||
|
location /registry/ {
|
||||||
|
proxy_pass http://backend;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user