Skip to content

Commit

Permalink
[Feature]: Refactor logger and add package info. (#332)
Browse files Browse the repository at this point in the history
* Add package info.

* fix raise error

* replace logger

* add unit test

* add unit test

* update test
  • Loading branch information
RangiLyu committed Oct 24, 2021
1 parent d7caaca commit 0f32d4a
Show file tree
Hide file tree
Showing 10 changed files with 217 additions and 86 deletions.
24 changes: 24 additions & 0 deletions nanodet/__about__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import time

_this_year = time.strftime("%Y")
__version__ = "0.4.2"
__author__ = "RangiLyu"
__author_email__ = "[email protected]"
__license__ = "Apache-2.0"
__copyright__ = f"Copyright (c) 2020-{_this_year}, {__author__}."
__homepage__ = "https://github.com/RangiLyu/nanodet"

__docs__ = (
"NanoDet: Deep learning object detection toolbox for super fast and "
"lightweight anchor-free object detection models."
)

__all__ = [
"__author__",
"__author_email__",
"__copyright__",
"__docs__",
"__homepage__",
"__license__",
"__version__",
]
8 changes: 8 additions & 0 deletions nanodet/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""package info."""

import os

from nanodet.__about__ import * # noqa: F401 F403

_PACKAGE_ROOT = os.path.dirname(__file__)
_PROJECT_ROOT = os.path.dirname(_PACKAGE_ROOT)
73 changes: 9 additions & 64 deletions nanodet/trainer/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

import copy
import json
import logging
import os
import warnings
from typing import Any, List
Expand Down Expand Up @@ -44,8 +43,7 @@ def __init__(self, cfg, evaluator=None):
self.model = build_model(cfg.model)
self.evaluator = evaluator
self.save_flag = -10
self.log_style = "NanoDet" # Log style. Choose between 'NanoDet' or 'Lightning'
# TODO: use callback to log
self.log_style = "NanoDet"

def _preprocess_batch_input(self, batch):
batch_imgs = batch["img"]
Expand Down Expand Up @@ -75,27 +73,7 @@ def training_step(self, batch, batch_idx):
preds, loss, loss_states = self.model.forward_train(batch)

# log train losses
if self.log_style == "Lightning":
self.log(
"lr",
self.optimizers().param_groups[0]["lr"],
on_step=True,
on_epoch=False,
prog_bar=True,
)
for k, v in loss_states.items():
self.log(
"Train/" + k,
v,
on_step=True,
on_epoch=True,
prog_bar=True,
sync_dist=True,
)
elif (
self.log_style == "NanoDet"
and self.global_step % self.cfg.log.interval == 0
):
if self.global_step % self.cfg.log.interval == 0:
lr = self.optimizers().param_groups[0]["lr"]
log_msg = "Train|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format(
self.current_epoch + 1,
Expand All @@ -115,7 +93,7 @@ def training_step(self, batch, batch_idx):
loss_states[loss_name].mean().item(),
self.global_step,
)
self.info(log_msg)
self.logger.info(log_msg)

return loss

Expand All @@ -127,25 +105,7 @@ def validation_step(self, batch, batch_idx):
batch = self._preprocess_batch_input(batch)
preds, loss, loss_states = self.model.forward_train(batch)

if self.log_style == "Lightning":
self.log(
"Val/loss",
loss,
on_step=True,
on_epoch=False,
prog_bar=True,
logger=False,
)
for k, v in loss_states.items():
self.log(
"Val/" + k,
v,
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
elif self.log_style == "NanoDet" and batch_idx % self.cfg.log.interval == 0:
if batch_idx % self.cfg.log.interval == 0:
lr = self.optimizers().param_groups[0]["lr"]
log_msg = "Val|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format(
self.current_epoch + 1,
Expand All @@ -158,7 +118,7 @@ def validation_step(self, batch, batch_idx):
log_msg += "{}:{:.4f}| ".format(
loss_name, loss_states[loss_name].mean().item()
)
self.info(log_msg)
self.logger.info(log_msg)

dets = self.model.head.post_process(preds, batch)
return dets
Expand Down Expand Up @@ -203,23 +163,9 @@ def validation_epoch_end(self, validation_step_outputs):
warnings.warn(
"Warning! Save_key is not in eval results! Only save model last!"
)
if self.log_style == "Lightning":
for k, v in eval_results.items():
self.log(
"Val_metrics/" + k,
v,
on_step=False,
on_epoch=True,
prog_bar=False,
sync_dist=True,
)
elif self.log_style == "NanoDet":
for k, v in eval_results.items():
self.scalar_summary(
"Val_metrics/" + k, "Val", v, self.current_epoch + 1
)
self.logger.log_metrics(eval_results, self.current_epoch + 1)
else:
self.info("Skip val on rank {}".format(self.local_rank))
self.logger.info("Skip val on rank {}".format(self.local_rank))

def test_step(self, batch, batch_idx):
dets = self.predict(batch, batch_idx)
Expand Down Expand Up @@ -248,7 +194,7 @@ def test_epoch_end(self, test_step_outputs):
for k, v in eval_results.items():
f.write("{}: {}\n".format(k, v))
else:
self.info("Skip test on rank {}".format(self.local_rank))
self.logger.info("Skip test on rank {}".format(self.local_rank))

def configure_optimizers(self):
"""
Expand Down Expand Up @@ -343,5 +289,4 @@ def scalar_summary(self, tag, phase, value, step):
self.logger.experiment.add_scalars(tag, {phase: value}, step)

def info(self, string):
if self.local_rank < 1:
logging.info(string)
self.logger.info(string)
3 changes: 2 additions & 1 deletion nanodet/util/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from .data_parallel import DataParallel
from .distributed_data_parallel import DDP
from .flops_counter import get_model_complexity_info
from .logger import AverageMeter, Logger, MovingAverage
from .logger import AverageMeter, Logger, MovingAverage, NanoDetLightningLogger
from .misc import images_to_levels, multi_apply, unmap
from .path import collect_files, mkdir
from .rank_filter import rank_filter
Expand Down Expand Up @@ -37,4 +37,5 @@
"Visualizer",
"overlay_bbox_cv",
"collect_files",
"NanoDetLightningLogger",
]
118 changes: 117 additions & 1 deletion nanodet/util/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,13 @@

import logging
import os
import time

import numpy as np
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.loggers.base import rank_zero_experiment
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.cloud_io import get_filesystem
from termcolor import colored

from .path import mkdir
Expand Down Expand Up @@ -50,7 +55,7 @@ def __init__(self, local_rank, save_dir="./", use_tensorboard=True):
'Please run "pip install future tensorboard" to install '
"the dependencies to use torch.utils.tensorboard "
"(applicable to PyTorch 1.1 or higher)"
)
) from None
if self.rank < 1:
logging.info(
"Using Tensorboard, logs will be saved in {}".format(self.log_dir)
Expand Down Expand Up @@ -103,3 +108,114 @@ def update(self, val, n=1):
self.count += n
if self.count > 0:
self.avg = self.sum / self.count


class NanoDetLightningLogger(LightningLoggerBase):
def __init__(self, save_dir="./", **kwargs):
super().__init__()
self._name = "NanoDet"
self._version = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
self.log_dir = os.path.join(save_dir, f"logs-{self._version}")

self._fs = get_filesystem(save_dir)
self._fs.makedirs(self.log_dir, exist_ok=True)
self._init_logger()

self._experiment = None
self._kwargs = kwargs

@property
def name(self):
return self._name

@property
@rank_zero_experiment
def experiment(self):
r"""
Actual tensorboard object. To use TensorBoard features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
self.logger.experiment.some_tensorboard_function()
"""
if self._experiment is not None:
return self._experiment

assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0"

try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError(
'Please run "pip install future tensorboard" to install '
"the dependencies to use torch.utils.tensorboard "
"(applicable to PyTorch 1.1 or higher)"
) from None

self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)
return self._experiment

@property
def version(self):
return self._version

@rank_zero_only
def _init_logger(self):
self.logger = logging.getLogger(name=self.name)
self.logger.setLevel(logging.INFO)

# create file handler
fh = logging.FileHandler(os.path.join(self.log_dir, "logs.txt"))
fh.setLevel(logging.INFO)
# set file formatter
f_fmt = "[%(name)s][%(asctime)s]%(levelname)s: %(message)s"
file_formatter = logging.Formatter(f_fmt, datefmt="%m-%d %H:%M:%S")
fh.setFormatter(file_formatter)

# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# set console formatter
c_fmt = (
colored("[%(name)s]", "magenta", attrs=["bold"])
+ colored("[%(asctime)s]", "blue")
+ colored("%(levelname)s:", "green")
+ colored("%(message)s", "white")
)
console_formatter = logging.Formatter(c_fmt, datefmt="%m-%d %H:%M:%S")
ch.setFormatter(console_formatter)

# add the handlers to the logger
self.logger.addHandler(fh)
self.logger.addHandler(ch)

@rank_zero_only
def info(self, string):
self.logger.info(string)

@rank_zero_only
def dump_cfg(self, cfg_node):
with open(os.path.join(self.log_dir, "train_cfg.yml"), "w") as f:
cfg_node.dump(stream=f)

@rank_zero_only
def log_hyperparams(self, params):
self.logger.info(f"hyperparams: {params}")

@rank_zero_only
def log_metrics(self, metrics, step):
self.logger.info(f"Val_metrics: {metrics}")
for k, v in metrics.items():
self.experiment.add_scalars("Val_metrics/" + k, {"Val": v}, step)

@rank_zero_only
def save(self):
super().save()

@rank_zero_only
def finalize(self, status):
self.experiment.flush()
self.experiment.close()
self.save()
10 changes: 5 additions & 5 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
#!/usr/bin/env python
from setuptools import find_packages, setup

__version__ = "0.4.0"
from nanodet import __author__, __author_email__, __docs__, __homepage__, __version__

if __name__ == "__main__":
setup(
name="nanodet",
version=__version__,
description="Deep Learning Object Detection Toolbox",
url="https://github.com/RangiLyu/nanodet",
author="RangiLyu",
author_email="[email protected]",
description=__docs__,
url=__homepage__,
author=__author__,
author_email=__author_email__,
keywords="deep learning",
packages=find_packages(exclude=("config", "tools", "demo")),
classifiers=[
Expand Down
11 changes: 7 additions & 4 deletions tests/test_trainer/test_lightning_task.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
import tempfile

import numpy as np
import torch
import torch.nn as nn

from nanodet.trainer.task import TrainingTask
from nanodet.util import cfg, load_config
from nanodet.util import NanoDetLightningLogger, cfg, load_config


class DummyModule(nn.Module):
class DummyTrainer(nn.Module):
current_epoch = 0
global_step = 0
local_rank = 0
use_ddp = False
logger = NanoDetLightningLogger(tempfile.TemporaryDirectory().name)

def save_checkpoint(self, *args, **kwargs):
pass
Expand All @@ -21,13 +24,14 @@ def __init__(self, task):
self.task = task

def test(self):
self.task.trainer = DummyTrainer()

optimizer = self.task.configure_optimizers()

def optimizers():
return optimizer

self.task.optimizers = optimizers
# self.task.trainer = DummyModule()

self.task.on_train_start()
assert self.task.current_epoch == 0
Expand Down Expand Up @@ -56,7 +60,6 @@ def func(*args, **kwargs):
self.task.scalar_summary = func
self.task.training_step(dummy_batch, 0)

self.task.trainer = DummyModule()
self.task.optimizer_step(optimizer=optimizer)
self.task.training_epoch_end([])
assert self.task.lr_scheduler.last_epoch == 1
Expand Down
Loading

0 comments on commit 0f32d4a

Please sign in to comment.