Compare commits

9 Commits

Author SHA1 Message Date
523edffc4e Merge pull request 'v1.0.2' (#6) from dev into master
Reviewed-on: #6
2026-04-13 10:28:11 +00:00
Timi
3cc371c53e v1.0.2
All checks were successful
CI / build-deploy (pull_request) Successful in 27s
CI / notify-on-failure (pull_request) Has been skipped
2026-04-13 18:27:29 +08:00
c7ddb1a8b0 Merge pull request 'v1.0.1' (#5) from dev into master
Reviewed-on: #5
2026-04-12 16:17:02 +00:00
Timi
b46e9079d5 v1.0.1
All checks were successful
CI / build-deploy (pull_request) Successful in 26s
CI / notify-on-failure (pull_request) Has been skipped
2026-04-13 00:09:55 +08:00
Timi
dc20070bf8 update status api 2026-04-13 00:09:48 +08:00
45c9fc814a Merge pull request 'v1.0.0' (#4) from dev into master
Reviewed-on: #4
2026-04-09 05:16:04 +00:00
Timi
78163441dd v1.0.0
All checks were successful
CI / build-deploy (pull_request) Successful in 16s
CI / notify-on-failure (pull_request) Has been skipped
2026-04-09 13:14:20 +08:00
407dc13ac4 Merge pull request 'v1.0.0' (#3) from dev into master
Reviewed-on: #3
2026-04-09 04:09:16 +00:00
Timi
9762be1244 v1.0.0
Some checks failed
CI / build-deploy (pull_request) Failing after 51s
CI / notify-on-failure (pull_request) Successful in 0s
2026-04-09 12:08:24 +08:00
12 changed files with 160 additions and 106 deletions

View File

@@ -52,25 +52,22 @@ jobs:
- name: Build project
run: |
mvn -B -DskipTests clean package
mvn -B -DskipTests clean package -P prod-linux
- name: Deploy service
if: success()
env:
HOST: host.docker.internal
APP_PATH: ${{ vars.APP_PATH }}
DOCKER_CONTAINER_NAME: ${{ vars.DOCKER_CONTAINER_NAME }}
SSHPASS: ${{ secrets.TIMI_SERVER_SSH_PWD }}
CONTAINER_NAME: ${{ vars.CONTAINER_NAME }}
CONTAINER_TARGET_PATH: ${{ vars.CONTAINER_TARGET_PATH }}
MAX_RETRIES: 3
RETRY_DELAY: 10
run: |
if [ -z "$HOST" ] || [ -z "$APP_PATH" ] || [ -z "DOCKER_CONTAINER_NAME" ] || [ -z "$SSHPASS" ]; then
if [ -z "$CONTAINER_NAME" ] || [ -z "$CONTAINER_TARGET_PATH" ]; then
echo "Missing production environment variables"
echo "Required: APP_PATH, DOCKER_CONTAINER_NAME, TIMI_SERVER_SSH_PWD"
echo "Required: CONTAINER_NAME, CONTAINER_TARGET_PATH"
exit 1
fi
# 重试函数
retry_command() {
local cmd="$1"
local desc="$2"
@@ -79,10 +76,10 @@ jobs:
while [ $attempt -le $MAX_RETRIES ]; do
echo "[$desc] Attempt $attempt/$MAX_RETRIES..."
if eval "$cmd"; then
echo " $desc succeeded"
echo "OK: $desc succeeded"
return 0
fi
echo " $desc failed (attempt $attempt/$MAX_RETRIES)"
echo "FAIL: $desc failed (attempt $attempt/$MAX_RETRIES)"
if [ $attempt -lt $MAX_RETRIES ]; then
echo "Retrying in ${RETRY_DELAY}s..."
sleep $RETRY_DELAY
@@ -90,16 +87,10 @@ jobs:
attempt=$((attempt + 1))
done
echo " $desc failed after $MAX_RETRIES attempts"
echo "FAIL: $desc failed after $MAX_RETRIES attempts"
return 1
}
# SSH 配置(使用密码认证)
SSH_PORT="22"
SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=30 -o ServerAliveInterval=10 -o ServerAliveCountMax=3 -p $SSH_PORT"
SCP_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=30 -o ServerAliveInterval=10 -o ServerAliveCountMax=3 -P $SSH_PORT"
# 获取构建产物信息
version=$(mvn -q -DforceStdout help:evaluate -Dexpression=project.version)
artifact_id=$(mvn -q -DforceStdout help:evaluate -Dexpression=project.artifactId)
jar_file="target/${artifact_id}-${version}.jar"
@@ -109,18 +100,26 @@ jobs:
exit 1
fi
# 目标文件名(去掉版本号)
target_jar="${artifact_id}.jar"
echo "Deploying $jar_file to $HOST:$APP_PATH/$target_jar"
# 上传文件(带重试)
if ! retry_command "sshpass -e scp $SCP_OPTS \"$jar_file\" \"root@$HOST:$APP_PATH/$target_jar\"" "SCP upload"; then
if ! command -v docker >/dev/null 2>&1; then
echo "docker command not found in runner environment"
exit 1
fi
# 重启 Docker 服务(带重试)
echo "Restarting Docker service: $DOCKER_SERVICE_NAME"
if ! retry_command "sshpass -e ssh $SSH_OPTS \"root@$HOST\" \"docker restart $DOCKER_SERVICE_NAME\"" "Docker restart"; then
if ! docker inspect "$CONTAINER_NAME" >/dev/null 2>&1; then
echo "Docker container not found: $CONTAINER_NAME"
exit 1
fi
target_jar="${artifact_id}.jar"
container_target="${CONTAINER_TARGET_PATH%/}/$target_jar"
echo "Deploying $jar_file to container $CONTAINER_NAME:$container_target"
if ! retry_command "docker cp \"$jar_file\" \"$CONTAINER_NAME:$container_target\"" "Docker copy"; then
exit 1
fi
echo "Restarting Docker container: $CONTAINER_NAME"
if ! retry_command "docker restart \"$CONTAINER_NAME\"" "Docker restart"; then
exit 1
fi
echo "Deployment completed successfully"
@@ -142,7 +141,6 @@ jobs:
exit 1
fi
# Use internal URL if available, fallback to public URL
if [ -n "$GITEA_INTERNAL_URL" ]; then
api_base_url="$GITEA_INTERNAL_URL"
echo "Using internal Gitea URL: $api_base_url"
@@ -151,7 +149,6 @@ jobs:
echo "Using public Gitea URL: $api_base_url"
fi
# 获取构建产物信息
version=$(mvn -q -DforceStdout help:evaluate -Dexpression=project.version)
artifact_id=$(mvn -q -DforceStdout help:evaluate -Dexpression=project.artifactId)
jar_file="target/${artifact_id}-${version}.jar"
@@ -179,17 +176,14 @@ jobs:
echo "API URL: $api_url"
echo "Target commit: $RELEASE_TARGET"
# 使用唯一临时文件避免跨 job 污染
release_response_file=$(mktemp /tmp/release_response_XXXXXX.json)
trap "rm -f $release_response_file" EXIT
# 创建 release带重试处理幂等性
release_id=""
attempt=1
while [ $attempt -le $MAX_RETRIES ] && [ -z "$release_id" ]; do
echo "[Create release] Attempt $attempt/$MAX_RETRIES..."
# 清空临时文件
> "$release_response_file"
http_code=$(curl -sS -w "%{http_code}" -o "$release_response_file" -X POST "$api_url" \
@@ -203,30 +197,27 @@ jobs:
echo "HTTP Status: $http_code"
if [ "$http_code" = "201" ]; then
# 提取第一个 id 字段的值,确保去除换行符
if command -v jq >/dev/null 2>&1; then
release_id=$(echo "$response" | jq -r '.id' 2>/dev/null)
else
release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -1 | cut -d: -f2 | tr -d '\n\r')
fi
echo " Release created: id=$release_id"
echo "OK: Release created: id=$release_id"
elif [ "$http_code" = "409" ]; then
# HTTP 409 Conflict: Release 已存在,获取现有的 release_id
echo "Release already exists (HTTP 409), fetching existing release..."
existing=$(curl -sS "$api_url" -H "Authorization: token $GITEA_TOKEN" --connect-timeout 30 2>/dev/null || echo "[]")
# 使用 jq 解析 JSON如果没有 jq 则用 grep
if command -v jq >/dev/null 2>&1; then
release_id=$(echo "$existing" | jq -r ".[] | select(.tag_name==\"$RELEASE_TAG\") | .id" 2>/dev/null | head -1)
else
release_id=$(echo "$existing" | grep -o '"id":[0-9]*' | head -1 | cut -d: -f2 | tr -d '\n\r')
fi
if [ -n "$release_id" ]; then
echo " Found existing release: id=$release_id"
echo "OK: Found existing release: id=$release_id"
else
echo " Could not find existing release id"
echo "FAIL: Could not find existing release id"
fi
else
echo "✗ Failed (HTTP $http_code)"
echo "FAIL: Create release failed (HTTP $http_code)"
if [ $attempt -lt $MAX_RETRIES ]; then
echo "Retrying in ${RETRY_DELAY}s..."
sleep $RETRY_DELAY
@@ -236,17 +227,15 @@ jobs:
done
if [ -z "$release_id" ]; then
echo " Failed to create/find release after $MAX_RETRIES attempts"
echo "FAIL: Failed to create or find release after $MAX_RETRIES attempts"
exit 1
fi
# 上传 fat jar带重试
asset_name=$(basename "$jar_file")
echo "Uploading asset: $asset_name (size: $file_size bytes)"
upload_url="$api_url/$release_id/assets?name=$asset_name"
echo "Upload URL: $upload_url"
# 使用唯一临时文件避免跨 job 污染
asset_response_file=$(mktemp /tmp/asset_response_XXXXXX.json)
trap "rm -f $release_response_file $asset_response_file" EXIT
@@ -255,10 +244,8 @@ jobs:
while [ $attempt -le $MAX_RETRIES ] && [ "$upload_success" = "false" ]; do
echo "[Upload asset] Attempt $attempt/$MAX_RETRIES..."
# 清空临时文件
> "$asset_response_file"
# Gitea API 要求使用 multipart/form-data 格式上传文件
http_code=$(curl -sS -w "%{http_code}" -o "$asset_response_file" -X POST "$upload_url" \
-H "Authorization: token $GITEA_TOKEN" \
--connect-timeout 30 \
@@ -267,9 +254,9 @@ jobs:
if [ "$http_code" = "201" ]; then
upload_success=true
echo " Successfully uploaded: $asset_name"
echo "OK: Successfully uploaded: $asset_name"
else
echo " Upload failed (HTTP $http_code)"
echo "FAIL: Upload failed (HTTP $http_code)"
cat "$asset_response_file" 2>/dev/null || true
fi
@@ -281,7 +268,7 @@ jobs:
done
if [ "$upload_success" = "false" ]; then
echo " Failed to upload asset after $MAX_RETRIES attempts"
echo "FAIL: Failed to upload asset after $MAX_RETRIES attempts"
exit 1
fi
@@ -306,7 +293,6 @@ jobs:
COMMIT_SHA: ${{ github.sha }}
REPO: ${{ github.repository }}
SERVER_URL: ${{ github.server_url }}
# 通知配置(按需启用)
WEBHOOK_URL: ${{ vars.NOTIFY_WEBHOOK_URL }}
run: |
echo "========================================="
@@ -324,11 +310,9 @@ jobs:
echo ""
echo "========================================="
# 发送 Webhook 通知(钉钉/企业微信/Slack 等)
if [ -n "$WEBHOOK_URL" ]; then
message="🚨 CI 部署失败\n\nPR: #$PR_NUMBER - $PR_TITLE\n分支: $SOURCE_BRANCH\n提交者: $AUTHOR\n\n请检查并决定:\n 重试 CI\n 回滚合并"
message="CI 部署失败\n\nPR: #$PR_NUMBER - $PR_TITLE\n分支: $SOURCE_BRANCH\n提交者: $AUTHOR\n\n请检查并决定:\n- 重试 CI\n- 回滚合并"
# 通用 JSON 格式(适配大多数 Webhook
payload=$(cat <<EOF
{
"msgtype": "text",
@@ -343,7 +327,7 @@ jobs:
-H "Content-Type: application/json" \
-d "$payload" || echo "Warning: Failed to send notification"
echo " Notification sent"
echo "OK: Notification sent"
else
echo "Note: Set vars.NOTIFY_WEBHOOK_URL to enable webhook notifications"
fi

27
pom.xml
View File

@@ -11,7 +11,7 @@
<groupId>com.imyeyu.timiserverapi</groupId>
<artifactId>TimiServerAPI</artifactId>
<version>1.0.0</version>
<version>1.0.1</version>
<packaging>jar</packaging>
<name>TimiServerAPI</name>
<description>imyeyu.com API</description>
@@ -23,13 +23,6 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>
<repositories>
<repository>
<id>apache-maven</id>
<url>https://repo.maven.apache.org/maven2/</url>
</repository>
</repositories>
<profiles>
<profile>
<id>dev-windows</id>
@@ -112,12 +105,28 @@
<configuration>
<excludeDevtools>true</excludeDevtools>
<mainClass>com.imyeyu.api.TimiServerAPI</mainClass>
<finalName>${project.artifactId}</finalName>
</configuration>
</plugin>
</plugins>
</build>
<repositories>
<repository>
<id>apache-maven</id>
<url>https://repo.maven.apache.org/maven2/</url>
</repository>
<repository>
<id>timi_nexus</id>
<url>https://nexus.imyeyu.com/repository/maven-public/</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>com.imyeyu.spring</groupId>

View File

@@ -348,10 +348,10 @@ public class ServerStatus implements TimiJava {
private String mountPoint;
/** 分区总空间 */
private long totalBytes;
private long total;
/** 分区已用空间 */
private Long usedBytes;
private Long used;
/** 磁盘传输耗时 */
private long transferTimeMs;

View File

@@ -26,17 +26,17 @@ public class DockerController {
private final DockerService dockerService;
@GetMapping("/containers")
@GetMapping("/container")
public List<DockerContainerSummaryView> listContainers() {
return dockerService.listContainers();
}
@GetMapping("/containers/{containerId}/status")
@GetMapping("/container/{containerId}/status")
public DockerContainerStatusView getContainerStatus(@PathVariable String containerId) {
return dockerService.getContainerStatus(containerId);
}
@GetMapping("/containers/{containerId}/history")
@GetMapping("/container/{containerId}/history")
public DockerContainerHistoryView getContainerHistory(@PathVariable String containerId, @RequestParam(required = false) String window) {
return dockerService.getContainerHistory(containerId, window);
}

View File

@@ -72,7 +72,6 @@ public class StatusServiceImplement implements StatusService {
SystemStatusDataView.OS os = new SystemStatusDataView.OS();
os.setName(serverStatus.getOs().getName());
os.setBootAt(serverStatus.getOs().getBootAt());
os.setUptimeMs(Math.max(0, serverTime - serverStatus.getOs().getBootAt()));
snapshot.setOs(os);
}
if (selectedMetrics.contains(Metric.CPU)) {
@@ -80,8 +79,8 @@ public class StatusServiceImplement implements StatusService {
cpu.setModel(serverStatus.getCpu().getName());
cpu.setPhysicalCores(serverStatus.getCpu().getCoreCount());
cpu.setLogicalCores(serverStatus.getCpu().getLogicalCount());
cpu.setUsagePercent(lastDouble(serverStatus.getCpu().getUsed()));
cpu.setSystemPercent(lastDouble(serverStatus.getCpu().getSystem()));
cpu.setUsageTotal(lastDouble(serverStatus.getCpu().getUsed()));
cpu.setUsageSystem(lastDouble(serverStatus.getCpu().getSystem()));
cpu.setTemperatureCelsius(serverStatus.getCpu().getTemperature());
snapshot.setCpu(cpu);
}
@@ -91,7 +90,6 @@ public class StatusServiceImplement implements StatusService {
Long swapUsedBytes = lastLong(serverStatus.getMemory().getSwapUsed());
memory.setTotalBytes(serverStatus.getMemory().getSize());
memory.setUsedBytes(usedBytes);
memory.setUsagePercent(toPercent(usedBytes, serverStatus.getMemory().getSize()));
memory.setSwapTotalBytes(serverStatus.getMemory().getSwapSize());
memory.setSwapUsedBytes(swapUsedBytes);
snapshot.setMemory(memory);
@@ -160,9 +158,9 @@ public class StatusServiceImplement implements StatusService {
item.setPartitionType(partition.getPartitionType());
item.setUuid(partition.getUuid());
item.setMountPoint(partition.getMountPoint());
item.setTotalBytes(partition.getTotalBytes());
item.setUsedBytes(partition.getUsedBytes());
item.setUsagePercent(toPercent(partition.getUsedBytes(), partition.getTotalBytes()));
item.setTotal(partition.getTotal());
item.setUsed(partition.getUsed());
item.setUsagePercent(toPercent(partition.getUsed(), partition.getTotal()));
item.setTransferTimeMs(partition.getTransferTimeMs());
storagePartitions.add(item);
}

View File

@@ -23,7 +23,7 @@ public class UpsServiceImplement implements UpsService {
private final UpsStatusTask upsStatusTask;
private final UpsStatusStore upsStatusStore;
@Value("${ups.collect-rate-ms:60000}")
@Value("${ups.collect-rate-ms:3000}")
private long collectRateMs;
@Override

View File

@@ -4,6 +4,7 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.imyeyu.api.modules.system.bean.DockerStatusStore;
import com.imyeyu.api.modules.system.util.DockerEngineClient;
import com.imyeyu.java.TimiJava;
import com.imyeyu.utils.Time;
import jakarta.validation.constraints.NotNull;
import lombok.RequiredArgsConstructor;
@@ -15,12 +16,13 @@ import org.springframework.scheduling.support.PeriodicTrigger;
import org.springframework.stereotype.Service;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* Docker 鐘舵€侀噰闆嗕换鍔?
* Docker 容器状态采集任务
*
* @author Codex
* @since 2026-04-06
@@ -56,19 +58,27 @@ public class DockerStatusTask implements SchedulingConfigurer {
try {
ArrayNode containers = (ArrayNode) dockerEngineClient.getJson("/containers/json", DockerEngineClient.query("all", "true"));
long now = Time.now();
synchronized (dockerStatusStore) {
Set<String> activeIds = new HashSet<>();
for (JsonNode summary : containers) {
try {
String containerId = getAsString(summary, "Id");
activeIds.add(containerId);
DockerStatusStore.Container container = dockerStatusStore.getContainers().computeIfAbsent(containerId, key -> new DockerStatusStore.Container());
updateContainerSummary(container, summary);
updateContainerInspect(containerId, container);
updateContainerStats(containerId, container, now);
} catch (Exception e) {
log.error("collect docker container item error", e);
Map<String, DockerStatusStore.Container> collectedContainers = new LinkedHashMap<>();
for (JsonNode summary : containers) {
try {
String containerId = getAsString(summary, "Id");
DockerStatusStore.Container container = new DockerStatusStore.Container();
if (TimiJava.isEmpty(container)) {
continue;
}
updateContainerSummary(container, summary);
updateContainerInspect(containerId, container);
updateContainerStats(containerId, container, now);
collectedContainers.put(container.getId(), container);
} catch (Exception e) {
log.error("collect docker container item error", e);
}
}
synchronized (dockerStatusStore) {
Set<String> activeIds = new HashSet<>(collectedContainers.keySet());
for (Map.Entry<String, DockerStatusStore.Container> item : collectedContainers.entrySet()) {
DockerStatusStore.Container container = dockerStatusStore.getContainers().computeIfAbsent(item.getKey(), key -> new DockerStatusStore.Container());
applyCollectedContainer(container, item.getValue());
}
dockerStatusStore.getContainers().entrySet().removeIf(item -> !activeIds.contains(item.getKey()));
}
@@ -77,6 +87,39 @@ public class DockerStatusTask implements SchedulingConfigurer {
}
}
private void applyCollectedContainer(DockerStatusStore.Container target, DockerStatusStore.Container source) {
target.setId(source.getId());
target.setName(source.getName());
target.setImage(source.getImage());
target.setImageId(source.getImageId());
target.setCreatedAt(source.getCreatedAt());
target.setState(source.getState());
target.setStatus(source.getStatus());
target.setHealthStatus(source.getHealthStatus());
target.setStartedAt(source.getStartedAt());
target.setFinishedAt(source.getFinishedAt());
target.setExitCode(source.getExitCode());
target.setRestartCount(source.getRestartCount());
target.setOomKilled(source.isOomKilled());
target.setCpuPercent(source.getCpuPercent());
target.setMemoryUsageBytes(source.getMemoryUsageBytes());
target.setMemoryLimitBytes(source.getMemoryLimitBytes());
target.setMemoryPercent(source.getMemoryPercent());
target.setNetworkRxBytes(source.getNetworkRxBytes());
target.setNetworkTxBytes(source.getNetworkTxBytes());
target.setBlockReadBytes(source.getBlockReadBytes());
target.setBlockWriteBytes(source.getBlockWriteBytes());
target.setPids(source.getPids());
target.setUpdatedAt(source.getUpdatedAt());
DockerStatusStore.Point point = source.getHistory().peekLast();
if (point != null) {
target.getHistory().addLast(point);
while (historyLimit < target.getHistory().size()) {
target.getHistory().pollFirst();
}
}
}
private void updateContainerSummary(DockerStatusStore.Container container, JsonNode summary) {
container.setId(getAsString(summary, "Id"));
container.setName(trimContainerName(readFirstArrayText(summary, "Names")));
@@ -108,8 +151,8 @@ public class DockerStatusTask implements SchedulingConfigurer {
if (memoryUsageBytes != null && memoryLimitBytes != null && 0 < memoryLimitBytes) {
memoryPercent = memoryUsageBytes * 100D / memoryLimitBytes;
}
Long networkRxBytes = 0L;
Long networkTxBytes = 0L;
long networkRxBytes = 0L;
long networkTxBytes = 0L;
JsonNode networks = getAsObject(stats, "networks");
if (networks != null) {
for (Map.Entry<String, JsonNode> item : (Iterable<Map.Entry<String, JsonNode>>) networks::fields) {
@@ -118,8 +161,8 @@ public class DockerStatusTask implements SchedulingConfigurer {
networkTxBytes += getAsLong(network, "tx_bytes", 0L);
}
}
Long blockReadBytes = 0L;
Long blockWriteBytes = 0L;
long blockReadBytes = 0L;
long blockWriteBytes = 0L;
JsonNode blkioStats = getAsObject(stats, "blkio_stats");
ArrayNode ioServiceBytes = blkioStats == null ? null : getAsArray(blkioStats, "io_service_bytes_recursive");
if (ioServiceBytes != null) {
@@ -134,7 +177,6 @@ public class DockerStatusTask implements SchedulingConfigurer {
}
}
Integer pids = getNestedInteger(stats, "pids_stats", "current");
container.setCpuPercent(cpuPercent);
container.setMemoryUsageBytes(memoryUsageBytes);
container.setMemoryLimitBytes(memoryLimitBytes);

View File

@@ -38,10 +38,10 @@ public class CpuStatusCollector extends AbstractDequeStatusCollector {
long irq = ticks[CentralProcessor.TickType.IRQ.getIndex()] - lastCpuTicks[CentralProcessor.TickType.IRQ.getIndex()];
long softIrq = ticks[CentralProcessor.TickType.SOFTIRQ.getIndex()] - lastCpuTicks[CentralProcessor.TickType.SOFTIRQ.getIndex()];
long steal = ticks[CentralProcessor.TickType.STEAL.getIndex()] - lastCpuTicks[CentralProcessor.TickType.STEAL.getIndex()];
long total = user + nice + sys + idle + ioWait + irq + softIrq + steal;
double total = user + nice + sys + idle + ioWait + irq + softIrq + steal;
if (0 < total) {
putDeque(context, context.getStatus().getCpu().getSystem(), 100D * sys / total);
putDeque(context, context.getStatus().getCpu().getUsed(), 100 - 100D * idle / total);
putDeque(context, context.getStatus().getCpu().getSystem(), sys / total);
putDeque(context, context.getStatus().getCpu().getUsed(), 1 - idle / total);
}
}
lastCpuTicks = ticks;

View File

@@ -38,13 +38,13 @@ public class StorageStatusCollector implements StatusCollector {
item.setPartitionType(partition.getType());
item.setUuid(partition.getUuid());
item.setMountPoint(partition.getMountPoint());
item.setTotalBytes(partition.getSize());
item.setTotal(partition.getSize());
item.setTransferTimeMs(diskStore.getTransferTime());
OSFileStore fileStore = matchFileStore(partition, fileStoreMap);
if (fileStore != null) {
fileStore.updateAttributes();
item.setUsedBytes(fileStore.getTotalSpace() - fileStore.getUsableSpace());
item.setUsed(fileStore.getTotalSpace() - fileStore.getUsableSpace());
}
context.getStatus().getStoragePartitions().add(item);
}

View File

@@ -18,6 +18,7 @@ import java.net.http.HttpResponse;
import java.nio.ByteBuffer;
import java.nio.channels.SocketChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.util.LinkedHashMap;
@@ -73,6 +74,28 @@ public class DockerEngineClient {
}
}
/**
* 判断 Docker Engine 当前是否可访问。
*
* @return 可访问返回 true不可访问返回 false
*/
public boolean isAvailable() {
if (host.startsWith("unix://")) {
String socketPath = host.substring("unix://".length());
return Files.exists(Path.of(socketPath));
}
return true;
}
/**
* 获取 Docker Engine 主机配置。
*
* @return 主机配置字符串
*/
public String getHost() {
return host;
}
private String buildRequestPath(String path, Map<String, String> queryParams) {
StringBuilder builder = new StringBuilder();
builder.append("/");

View File

@@ -101,9 +101,6 @@ public class SystemStatusDataView {
/** 启动时间 */
private long bootAt;
/** 运行时长 */
private long uptimeMs;
}
/**
@@ -125,10 +122,10 @@ public class SystemStatusDataView {
private int logicalCores;
/** 总占用 */
private Double usagePercent;
private Double usageTotal;
/** 系统占用 */
private Double systemPercent;
private Double usageSystem;
/** 温度 */
private double temperatureCelsius;
@@ -149,9 +146,6 @@ public class SystemStatusDataView {
/** 已用内存 */
private Long usedBytes;
/** 使用率 */
private Double usagePercent;
/** 交换分区总量 */
private long swapTotalBytes;
@@ -364,10 +358,10 @@ public class SystemStatusDataView {
private String mountPoint;
/** 分区总空间 */
private long totalBytes;
private long total;
/** 已用空间 */
private Long usedBytes;
private Long used;
/** 使用率 */
private Double usagePercent;

View File

@@ -1,5 +1,9 @@
server:
shutdown: graceful
# 压缩
compression:
enable: true
min-response-size: 10KB
# 开发环境语言,激活开发配置时,多语言系统始终使用此语言环境
dev: