refactor(llm): 将 debug 日志级别改为 info 日志级别

- 将多个 debug 日志级别修改为 info 日志级别,以减少日志输出
-优化了日志记录策略,提高了系统性能和可读性
This commit is contained in:
Liuyang 2025-03-12 09:44:58 +08:00
parent 8ef14cd75b
commit 49d11cce34

View File

@ -58,7 +58,7 @@ public class BaseModelTaskService {
log.info("开始同步基础模型信息...");
// 获取所有基础模型列表
log.debug("正在查询所有基础模型列表...");
log.info("正在查询所有基础模型列表...");
List<BaseModelDO> baseModelList = baseModelService.getBaseModelList();
log.info("成功查询到 {} 个基础模型。", baseModelList.size());
@ -66,25 +66,25 @@ public class BaseModelTaskService {
for (BaseModelDO baseModelDO : baseModelList) {
Long modelId = baseModelDO.getModelId();
Long gpuId = baseModelDO.getGpuId();
log.debug("正在处理基础模型模型ID: {}, GPU ID: {}", modelId, gpuId);
log.info("正在处理基础模型模型ID: {}, GPU ID: {}", modelId, gpuId);
// 查询 GPU 服务器信息
log.debug("正在查询 GPU 服务器信息GPU ID: {}", gpuId);
log.info("正在查询 GPU 服务器信息GPU ID: {}", gpuId);
ServerNameDO serverName = serverNameMapper.selectById(gpuId);
if (serverName == null) {
log.error("未找到 GPU 服务器信息GPU ID: {}", gpuId);
continue;
}
log.debug("GPU 服务器信息查询成功。主机地址: {}", serverName.getHost());
log.info("GPU 服务器信息查询成功。主机地址: {}", serverName.getHost());
// 构建查询参数并查询模型部署信息
String query = "?filter={\"id\":" + modelId + "}";
log.debug("正在查询模型部署信息,查询参数: {}", query);
log.info("正在查询模型部署信息,查询参数: {}", query);
String res = fineTuningTaskHttpService.modelTableQuery(new HashMap<>(), serverName.getHost(), "model_deploy", query);
log.debug("模型部署信息查询成功。响应内容: {}", res);
log.info("模型部署信息查询成功。响应内容: {}", res);
// 解析响应内容
log.debug("正在解析模型部署信息...");
log.info("正在解析模型部署信息...");
ObjectMapper mapper = new ObjectMapper();
mapper.registerModule(new JavaTimeModule());
SimpleModule module = new SimpleModule();
@ -92,12 +92,12 @@ public class BaseModelTaskService {
mapper.registerModule(module);
List<AigcModelDeployVO> aigcModelDeploys = mapper.readValue(res, new TypeReference<List<AigcModelDeployVO>>() {
});
log.debug("模型部署信息解析完成。记录数量: {}", aigcModelDeploys.size());
log.info("模型部署信息解析完成。记录数量: {}", aigcModelDeploys.size());
if (!aigcModelDeploys.isEmpty()) {
AigcModelDeployVO latestRecord = aigcModelDeploys.get(0);
String status = latestRecord.getStatus();
log.debug("最新模型部署记录状态: {}", status);
log.info("最新模型部署记录状态: {}", status);
// 如果模型状态为 "stop"则重新部署
if ("stop".equals(status)) {
@ -106,12 +106,12 @@ public class BaseModelTaskService {
// 构建模型部署请求
AigcModelDeploySaveReq aigcModelDeploySaveReq = new AigcModelDeploySaveReq(
baseModelDO.getAigcModelName(), "gpu");
log.debug("模型部署请求参数: {}", JSON.toJSONString(aigcModelDeploySaveReq));
log.info("模型部署请求参数: {}", JSON.toJSONString(aigcModelDeploySaveReq));
// 发起模型部署请求
ModelDeployRespVO modelDeployRespVO = trainHttpService.modelDeploy(
new HashMap<>(), serverName.getHost(), aigcModelDeploySaveReq);
log.debug("模型部署请求完成。响应内容: {}", JSON.toJSONString(modelDeployRespVO));
log.info("模型部署请求完成。响应内容: {}", JSON.toJSONString(modelDeployRespVO));
// 更新基础模型信息
if (!"error".equals(modelDeployRespVO.getMessage())) {
@ -247,7 +247,7 @@ public class BaseModelTaskService {
localModel.setApiUrl(string1);
localModel.setModelId((long) pedestalModelVo.getId());
baseModelService.updetatebyId(localModel);
log.debug("模型 {} 状态为 running无需更新", pedestalModelVo.getDeploymentName());
log.info("模型 {} 状态为 running无需更新", pedestalModelVo.getDeploymentName());
}
}
// else {