python logging 日志记录 多进程 上线版本 不会重复输出

#coding=utf-8
import platform
from gtwisted.utils import log


import os
import time
import errno

PF_YX = (platform.platform()).split("-")[0]
G_LOG_FILE = "/home/sy/workspace/server/lzserver/game/common/log/game.log"
LOCK_FILE = "/home/sy/workspace/server/lzserver/game/common/log/gamelock"
LOCK_DIR = "/home/sy/workspace/server/lzserver/game/common/log/.lock"


from logging import StreamHandler, FileHandler
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
import fcntl, time, os, codecs, string, re, types, cPickle, struct, shutil
from stat import ST_DEV, ST_INO, ST_MTIME


class StreamHandler_MP(StreamHandler):
    """
    A handler class which writes logging records, appropriately formatted,
    to a stream. Use for multiprocess.
    """
    
    def emit(self, record):
        """
        Emit a record.
            First seek the end of file for multiprocess to log to the same file
        """
        try:
            if hasattr(self.stream, "seek"):
                self.stream.seek(0, os.SEEK_END)
        except IOError, e:
            pass
        
        StreamHandler.emit(self, record)


class FileHandler_MP(FileHandler, StreamHandler_MP):
    """
    A handler class which writes formatted logging records to disk files 
        for multiprocess
    """
    def emit(self, record):
        """
        Emit a record.

        If the stream was not opened because 'delay' was specified in the
        constructor, open it before calling the superclass's emit.
        """
        if self.stream is None:
            self.stream = self._open()
        StreamHandler_MP.emit(self, record)


class RotatingFileHandler_MP(RotatingFileHandler, FileHandler_MP):
    """
    Handler for logging to a set of files, which switches from one file
    to the next when the current file reaches a certain size.
    
    Based on logging.RotatingFileHandler, modified for Multiprocess
    """
    _lock_dir = LOCK_DIR
    if os.path.exists(_lock_dir):
        pass
    else:
        os.mkdir(_lock_dir)

    def doRollover(self):
        """
        Do a rollover, as described in __init__().
        For multiprocess, we use shutil.copy instead of rename.
        """

        self.stream.close()
        if self.backupCount > 0:
            for i in range(self.backupCount - 1, 0, -1):
                sfn = "%s.%d" % (self.baseFilename, i)
                dfn = "%s.%d" % (self.baseFilename, i + 1)
                if os.path.exists(sfn):
                    if os.path.exists(dfn):
                        os.remove(dfn)
                    shutil.copy(sfn, dfn)
            dfn = self.baseFilename + ".1"
            if os.path.exists(dfn):
                os.remove(dfn)
            if os.path.exists(self.baseFilename):
                shutil.copy(self.baseFilename, dfn)
        self.mode = 'w'
        self.stream = self._open()
        
    
    def emit(self, record):
        """
        Emit a record.

        Output the record to the file, catering for rollover as described
        in doRollover().
        
        For multiprocess, we use file lock. Any better method ?
        """
        try:
            if self.shouldRollover(record):
                self.doRollover()
            FileLock = self._lock_dir + '/' + os.path.basename(self.baseFilename) + '.' + record.levelname
            f = open(FileLock, "w+")
            fcntl.flock(f.fileno(), fcntl.LOCK_EX)
            FileHandler_MP.emit(self, record)
            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
            f.close()
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)
    
        
class TimedRotatingFileHandler_MP(TimedRotatingFileHandler, FileHandler_MP):
    """
    Handler for logging to a file, rotating the log file at certain timed
    intervals.

    If backupCount is > 0, when rollover is done, no more than backupCount
    files are kept - the oldest ones are deleted.
    """
    _lock_dir = LOCK_DIR
    if os.path.exists(_lock_dir):
        pass
    else:
        os.mkdir(_lock_dir)
    
    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0):
        FileHandler_MP.__init__(self, filename, 'a', encoding, delay)
        self.encoding = encoding
        self.when = string.upper(when)
        self.backupCount = backupCount
        self.utc = utc
        # Calculate the real rollover interval, which is just the number of
        # seconds between rollovers.  Also set the filename suffix used when
        # a rollover occurs.  Current 'when' events supported:
        # S - Seconds
        # M - Minutes
        # H - Hours
        # D - Days
        # midnight - roll over at midnight
        # W{0-6} - roll over on a certain day; 0 - Monday
        #
        # Case of the 'when' specifier is not important; lower or upper case
        # will work.
        if self.when == 'S':
            self.suffix = "%Y-%m-%d_%H-%M-%S"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
        elif self.when == 'M':
            self.suffix = "%Y-%m-%d_%H-%M"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
        elif self.when == 'H':
            self.suffix = "%Y-%m-%d_%H"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
        elif self.when == 'D' or self.when == 'MIDNIGHT':
            self.suffix = "%Y-%m-%d"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
        elif self.when.startswith('W'):
            if len(self.when) != 2:
                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
            if self.when[1] < '0' or self.when[1] > '6':
                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
            self.dayOfWeek = int(self.when[1])
            self.suffix = "%Y-%m-%d"
            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
        else:
            raise ValueError("Invalid rollover interval specified: %s" % self.when)

        self.extMatch = re.compile(self.extMatch)
        
        if interval != 1:
            raise ValueError("Invalid rollover interval, must be 1")


    def shouldRollover(self, record):
        """
        Determine if rollover should occur.

        record is not used, as we are just comparing times, but it is needed so
        the method signatures are the same
        """
        if not os.path.exists(self.baseFilename):
            #print "file don't exist"  
            return 0 
        
        cTime = time.localtime(time.time()) 
        mTime = time.localtime(os.stat(self.baseFilename)[ST_MTIME])
        if self.when == "S" and cTime[5] != mTime[5]:
            #print "cTime:", cTime[5], "mTime:", mTime[5]
            return 1
        elif self.when == 'M' and cTime[4] != mTime[4]:  
            #print "cTime:", cTime[4], "mTime:", mTime[4]
            return 1  
        elif self.when == 'H' and cTime[3] != mTime[3]: 
            #print "cTime:", cTime[3], "mTime:", mTime[3]
            return 1
        elif (self.when == 'MIDNIGHT' or self.when == 'D') and cTime[2] != mTime[2]:
            #print "cTime:", cTime[2], "mTime:", mTime[2]
            return 1 
        elif self.when == 'W' and cTime[1] != mTime[1]:
            #print "cTime:", cTime[1], "mTime:", mTime[1]
            return 1
        else:  
            return 0 
    

    def doRollover(self):
        """
        do a rollover; in this case, a date/time stamp is appended to the filename
        when the rollover happens.  However, you want the file to be named for the
        start of the interval, not the current time.  If there is a backup count,
        then we have to get a list of matching filenames, sort them and remove
        the one with the oldest suffix.
        
        For multiprocess, we use shutil.copy instead of rename.
        """
        if self.stream:
            self.stream.close()
        # get the time that this sequence started at and make it a TimeTuple
        #t = self.rolloverAt - self.interval
        t = int(time.time())
        if self.utc:
            timeTuple = time.gmtime(t)
        else:
            timeTuple = time.localtime(t)
        dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
        if os.path.exists(dfn):
            os.remove(dfn)
        if os.path.exists(self.baseFilename):
            shutil.copy(self.baseFilename, dfn)
            #print "%s -> %s" % (self.baseFilename, dfn)
            #os.rename(self.baseFilename, dfn)
        if self.backupCount > 0:
            # find the oldest log file and delete it
            #s = glob.glob(self.baseFilename + ".20*")
            #if len(s) > self.backupCount:
            #    s.sort()
            #    os.remove(s[0])
            for s in self.getFilesToDelete():
                os.remove(s)
        self.mode = 'w'
        self.stream = self._open()
    
    def emit(self, record):
        """
        Emit a record.

        Output the record to the file, catering for rollover as described
        in doRollover().
        
        For multiprocess, we use file lock. Any better method ?
        """
        try:
            if self.shouldRollover(record):
                self.doRollover()
            FileLock = self._lock_dir + '/' + os.path.basename(self.baseFilename) + '.' + record.levelname
            f = open(FileLock, "w+")
            fcntl.flock(f.fileno(), fcntl.LOCK_EX)
            FileHandler_MP.emit(self, record)
            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
            f.close()
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)
        
 
class FileLockException(Exception):
    pass
 
class FileLock(object):
    """ A file locking mechanism that has context-manager support so 
        you can use it in a with statement. This should be relatively cross
        compatible as it doesn't rely on msvcrt or fcntl for the locking.
    """
 
    def __init__(self, file_name, timeout=10, delay=.05):
        """ Prepare the file locker. Specify the file to lock and optionally
            the maximum timeout and the delay between each attempt to lock.
        """
        self.is_locked = False
        self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
        self.file_name = file_name
        self.timeout = timeout
        self.delay = delay
 
 
    def acquire(self):
        """ Acquire the lock, if possible. If the lock is in use, it check again
            every `wait` seconds. It does this until it either gets the lock or
            exceeds `timeout` number of seconds, in which case it throws 
            an exception.
        """
        start_time = time.time()
        while True:
            try:
                #独占式打开文件
                self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)
                break;
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise 
                if (time.time() - start_time) >= self.timeout:
                    raise FileLockException("Timeout occured.")
                time.sleep(self.delay)
        self.is_locked = True
 
 
    def release(self):
        """ Get rid of the lock by deleting the lockfile. 
            When working in a `with` statement, this gets automatically 
            called at the end.
        """
        #关闭文件,删除文件
        if self.is_locked:
            os.close(self.fd)
            os.unlink(self.lockfile)
            self.is_locked = False
 
 
    def __enter__(self):
        """ Activated when used in the with statement. 
            Should automatically acquire a lock to be used in the with block.
        """
        if not self.is_locked:
            self.acquire()
        return self
 
 
    def __exit__(self, type, value, traceback):
        """ Activated at the end of the with statement.
            It automatically releases the lock if it isn't locked.
        """
        if self.is_locked:
            self.release()
 
 
    def __del__(self):
        """ Make sure that the FileLock instance doesn't leave a lockfile
            lying around.
        """
        self.release()


class Dadian(object):
        def err(self):
            pass
        
        def info(self,msg):
            pass
        
        def debug(self):
            pass
        
class LDadian(object):
    def __init__(self, logger):
        self.logger = logger
        
    def info(self, *args, **kwrags):
        with FileLock(LOCK_FILE):
            try:
                self.logger.info(*args, **kwrags)
            except BaseException,e:
                pass
#
def initlog1(LOG_FILE=G_LOG_FILE):
    """
    @des: 进程和线程都安全--只能在linux上运行, 用这个方法需要安装 pip install ConcurrentLogHandler 
    """
    import logging
    #from cloghandler import ConcurrentRotatingFileHandler
    # 生成一个日志对象
    logger = logging.getLogger("dadian")
    if not logger.handlers:#重复输出日志的问题,这样解决
        # 生成一个Handler。logging支持许多Handler,例如FileHandler, SocketHandler, SMTPHandler等,
        #print logger.manager.loggerDict
    # 我由于要写文件就使用了FileHandler。
        #hdlr = logging.FileHandler(LOG_FILE)
        
        #interval参数默认“1”,如果when=‘h’,那么就是每一小时对日志进行一次分割,即debug.log所在目录会出现 
        hdlr = TimedRotatingFileHandler_MP(LOG_FILE,when='midnight',interval=1,backupCount=100)  
        #hdlr = logging.handlers.TimedRotatingFileHandler(LOG_FILE,when='S',interval=1,backupCount=1000)
        #hdlr = logging.handlers.TimedRotatingFileHandler(LOG_FILE,when='midnight',interval=1,backupCount=100)
        #hdlr = logging.handlers.RotatingFileHandler(LOG_FILE,maxBytes=1024*1024*50,backupCount=100)
        #hdlr = ConcurrentRotatingFileHandler(LOG_FILE,"a", maxBytes=1024*1024*50,backupCount=100)
        # 生成一个格式器,用于规范日志的输出格式。如果没有这行代码,那么缺省的
        # 格式就是:"%(message)s"。也就是写日志时,信息是什么日志中就是什么,
        # 没有日期,没有信息级别等信息。logging支持许多种替换值,详细请看
        # Formatter的文档说明。这里有三项:时间,信息级别,日志信息
        #formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        # 将格式器设置到处理器上
        hdlr.setFormatter(formatter)
        # 将处理器加到日志对象上
        logger.addHandler(hdlr)
        # 设置日志信息输出的级别。logging提供多种级别的日志信息,如:NOTSET,
        # DEBUG, INFO, WARNING, ERROR, CRITICAL等。每个级别都对应一个数值。
        # 如果不执行此句,缺省为30(WARNING)。可以执行:logging.getLevelName
        # (logger.getEffectiveLevel())来查看缺省的日志级别。日志对象对于不同
        # 的级别信息提供不同的函数进行输出,如:info(), error(), debug()等。当
        # 写入日志时,小于指定级别的信息将被忽略。因此为了输出想要的日志级别一定
        # 要设置好此参数。这里我设为NOTSET(值为0),也就是想输出所有信息
    logger.setLevel(logging.DEBUG)
    #print logger.handlers
    ldadian = LDadian(logger)
    return ldadian


if PF_YX=='Windows':
    dadian = Dadian()
else:
    dadian = initlog1()

if __name__=="__main__":
    #dadian = initlog("./game.log")
    dadian.info("1 2")

分享到: 微信 更多