Ubuntu下Python日志管理方法
logging模块(基础配置)Python内置的logging模块是日志管理的核心工具,支持多级别日志(DEBUG/INFO/WARNING/ERROR/CRITICAL)、格式化输出和多目标输出(控制台/文件)。
basicConfig快速设置日志级别、格式和输出文件,适用于简单场景。import logging
logging.basicConfig(
level=logging.DEBUG, # 设置最低日志级别(开发用DEBUG,生产用ERROR)
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', # 包含时间、模块名、级别、消息
filename='app.log', # 日志文件路径
filemode='a' # 追加模式('w'为覆盖)
)
logging.debug('Debug message for diagnosis') # 记录调试信息
logging.info('Application started') # 记录常规信息
logging.error('Failed to connect to database') # 记录错误信息
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger('my_app')
logger.setLevel(logging.DEBUG)
# 控制台处理器(StreamHandler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
# 文件处理器(RotatingFileHandler,按大小轮转)
file_handler = RotatingFileHandler('app.log', maxBytes=1024*1024, backupCount=3) # 1MB大小,保留3个备份
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
logger.addHandler(console_handler)
logger.addHandler(file_handler)
当日志文件持续增长时,需通过轮转分割文件,常用工具为logging.handlers中的RotatingFileHandler(按大小)和TimedRotatingFileHandler(按时间),或系统级工具logrotate。
maxBytes(如1MB)时,自动创建新文件并保留backupCount(如3个)旧文件。from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler('app.log', maxBytes=1024*1024, backupCount=3)
handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(handler)
midnight)轮转,保留backupCount(如7天)旧文件。from logging.handlers import TimedRotatingFileHandler
handler = TimedRotatingFileHandler('app.log', when='midnight', interval=1, backupCount=7)
handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(handler)
logrotate工具实现更灵活的轮转策略(如按天、压缩旧日志),适用于生产环境。
logrotate:sudo apt install logrotate/etc/logrotate.d/python_app:/var/log/python/app.log {
daily # 每天轮转
rotate 7 # 保留7天
compress # 压缩旧日志(如app.log.1.gz)
missingok # 文件不存在时不报错
notifempty # 文件为空时不轮转
copytruncate # 复制原文件后清空(避免重启应用)
}
sudo logrotate -f /etc/logrotate.d/python_app第三方库如Loguru提供了更简洁的API,支持自动轮转、异步日志和结构化日志,适合快速开发。
logger.add配置日志输出,支持自动轮转和压缩。from loguru import logger
# 配置日志文件(自动轮转1MB,保留7天,压缩)
logger.add("app.log", rotation="1 MB", retention="7 days", compression="zip")
logger.debug("This is a debug message") # 无需手动获取logger
logger.error("An error occurred!")
在分布式系统中,可将Python日志发送到ELK Stack(Elasticsearch+Logstash+Kibana),实现集中存储、搜索和可视化。
# 安装Elasticsearch
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-7.x.list
sudo apt update && sudo apt install elasticsearch
# 安装Logstash
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/logstash-7.x.list
sudo apt update && sudo apt install logstash
# 安装Kibana
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/kibana-7.x.list
sudo apt update && sudo apt install kibana
/etc/logstash/conf.d/python.conf:input {
file {
path => "/var/log/python/app.log" # Python日志文件路径
start_position => "beginning" # 从文件开头读取
sincedb_path => "/dev/null" # 忽略sincedb文件(测试用)
}
}
output {
elasticsearch {
hosts => ["localhost:9200"] # Elasticsearch地址
index => "python-logs" # 索引名称
}
stdout { codec => rubydebug } # 控制台输出(调试用)
}
sudo systemctl start elasticsearch
sudo systemctl start logstash
sudo systemctl start kibana
http://localhost:5601,配置索引模式python-logs-*,即可可视化日志数据。DEBUG(详细信息),生产环境用ERROR(仅关键错误),避免日志量过大。import json
import logging
logger = logging.getLogger('structured_logger')
logger.setLevel(logging.INFO)
handler = logging.FileHandler('structured.log')
handler.setFormatter(logging.Formatter('%(message)s')) # 直接输出JSON字符串
logger.addHandler(handler)
log_entry = {
'timestamp': '2025-11-06T12:00:00',
'level': 'INFO',
'message': 'User logged in',
'user_id': 123,
'ip': '192.168.1.1'
}
logger.info(json.dumps(log_entry))
chmod 640 app.log),避免敏感信息泄露。logrotate或脚本自动删除超过7天的旧日志,节省存储空间。