Skip to content
Snippets Groups Projects
Commit 391743ad authored by Frisinghelli Daniel's avatar Frisinghelli Daniel
Browse files

Implemented python logging module

parent 4338ddfb
No related branches found
No related tags found
No related merge requests found
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 10:07:12 2020
@author: Daniel
"""
# the logging configuration dictionary
def log_conf(logfile):
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'brief': {
'format': '%(name)s: %(message)s'
},
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'brief',
'level': 'INFO',
'stream': 'ext://sys.stderr',
},
'file': {
'class': 'logging.FileHandler',
'formatter': 'standard',
'level': 'INFO',
'filename': logfile,
'mode': 'w'
}
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'INFO',
},
}
}
return LOGGING_CONFIG
...@@ -8,6 +8,7 @@ Created on Fri Jun 26 16:31:36 2020 ...@@ -8,6 +8,7 @@ Created on Fri Jun 26 16:31:36 2020
# builtins # builtins
import os import os
import enum import enum
import logging
# externals # externals
import numpy as np import numpy as np
...@@ -19,6 +20,9 @@ import torch.optim as optim ...@@ -19,6 +20,9 @@ import torch.optim as optim
from pysegcnn.core.layers import (Encoder, Decoder, Conv2dPool, Conv2dUnpool, from pysegcnn.core.layers import (Encoder, Decoder, Conv2dPool, Conv2dUnpool,
Conv2dUpsample, Conv2dSame) Conv2dUpsample, Conv2dSame)
# module level logger
LOGGER = logging.getLogger(__name__)
class Network(nn.Module): class Network(nn.Module):
...@@ -43,7 +47,10 @@ class Network(nn.Module): ...@@ -43,7 +47,10 @@ class Network(nn.Module):
# initialize dictionary to store network parameters # initialize dictionary to store network parameters
model_state = {} model_state = {}
# store input bands # store model name
model_state['cls'] = self.__class__
# store the bands the model was trained with
model_state['bands'] = bands model_state['bands'] = bands
# store construction parameters to instanciate the network # store construction parameters to instanciate the network
...@@ -67,7 +74,7 @@ class Network(nn.Module): ...@@ -67,7 +74,7 @@ class Network(nn.Module):
# model state dictionary stores the values of all trainable parameters # model state dictionary stores the values of all trainable parameters
state = os.path.join(outpath, state_file) state = os.path.join(outpath, state_file)
torch.save(model_state, state) torch.save(model_state, state)
print('Network parameters saved in {}'.format(state)) LOGGER.info('Network parameters saved in {}'.format(state))
return state return state
...@@ -79,13 +86,13 @@ class Network(nn.Module): ...@@ -79,13 +86,13 @@ class Network(nn.Module):
model_state = torch.load(state) model_state = torch.load(state)
# resume network parameters # resume network parameters
print('Loading network parameters from {} ...'.format(state)) LOGGER.info('Loading model parameters ...'.format(state))
self.load_state_dict(model_state['model_state_dict']) self.load_state_dict(model_state['model_state_dict'])
self.epoch = model_state['epoch'] self.epoch = model_state['epoch']
# resume optimizer parameters # resume optimizer parameters
if optimizer is not None: if optimizer is not None:
print('Loading optimizer parameters from {} ...'.format(state)) LOGGER.info('Loading optimizer parameters ...'.format(state))
optimizer.load_state_dict(model_state['optim_state_dict']) optimizer.load_state_dict(model_state['optim_state_dict'])
return state return state
......
# builtins # builtins
import os import os
import pathlib import pathlib
import logging
# externals # externals
import numpy as np import numpy as np
...@@ -14,6 +15,9 @@ from pysegcnn.core.utils import reconstruct_scene, accuracy_function ...@@ -14,6 +15,9 @@ from pysegcnn.core.utils import reconstruct_scene, accuracy_function
from pysegcnn.core.graphics import plot_sample from pysegcnn.core.graphics import plot_sample
from pysegcnn.core.split import RandomSubset, SceneSubset from pysegcnn.core.split import RandomSubset, SceneSubset
# module level logger
LOGGER = logging.getLogger(__name__)
def get_scene_tiles(ds, scene_id): def get_scene_tiles(ds, scene_id):
...@@ -33,7 +37,6 @@ def predict_samples(ds, model, optimizer, state_file, cm=False, ...@@ -33,7 +37,6 @@ def predict_samples(ds, model, optimizer, state_file, cm=False,
# check whether the dataset is a valid subset, i.e. # check whether the dataset is a valid subset, i.e.
# an instance of pysegcnn.core.split.SceneSubset or # an instance of pysegcnn.core.split.SceneSubset or
# an instance of pysegcnn.core.split.RandomSubset # an instance of pysegcnn.core.split.RandomSubset
_name = type(ds).__name__
if not isinstance(ds, RandomSubset) or not isinstance(ds, SceneSubset): if not isinstance(ds, RandomSubset) or not isinstance(ds, SceneSubset):
raise TypeError('ds should be an instance of {} or of {}.' raise TypeError('ds should be an instance of {} or of {}.'
.format(repr(RandomSubset), repr(SceneSubset))) .format(repr(RandomSubset), repr(SceneSubset)))
...@@ -50,7 +53,7 @@ def predict_samples(ds, model, optimizer, state_file, cm=False, ...@@ -50,7 +53,7 @@ def predict_samples(ds, model, optimizer, state_file, cm=False,
_ = model.load(state_file.name, optimizer, state_file.parent) _ = model.load(state_file.name, optimizer, state_file.parent)
# set the model to evaluation mode # set the model to evaluation mode
print('Setting model to evaluation mode ...') LOGGER.info('Setting model to evaluation mode ...')
model.eval() model.eval()
model.to(device) model.to(device)
...@@ -66,7 +69,7 @@ def predict_samples(ds, model, optimizer, state_file, cm=False, ...@@ -66,7 +69,7 @@ def predict_samples(ds, model, optimizer, state_file, cm=False,
# iterate over the samples and plot inputs, ground truth and # iterate over the samples and plot inputs, ground truth and
# model predictions # model predictions
output = {} output = {}
print('Predicting samples of the {} dataset ...'.format(ds.name)) LOGGER.info('Predicting samples of the {} dataset ...'.format(ds.name))
for batch, (inputs, labels) in enumerate(dataloader): for batch, (inputs, labels) in enumerate(dataloader):
# send inputs and labels to device # send inputs and labels to device
...@@ -80,9 +83,8 @@ def predict_samples(ds, model, optimizer, state_file, cm=False, ...@@ -80,9 +83,8 @@ def predict_samples(ds, model, optimizer, state_file, cm=False,
# store output for current batch # store output for current batch
output[batch] = {'input': inputs, 'labels': labels, 'prediction': prd} output[batch] = {'input': inputs, 'labels': labels, 'prediction': prd}
print('Sample: {:d}/{:d}, Accuracy: {:.2f}' LOGGER.info('Sample: {:d}/{:d}, Accuracy: {:.2f}'.format(
.format(batch + 1, len(dataloader), batch + 1, len(dataloader), accuracy_function(prd, labels)))
accuracy_function(prd, labels)))
# update confusion matrix # update confusion matrix
if cm: if cm:
...@@ -126,7 +128,7 @@ def predict_scenes(ds, model, optimizer, state_file, ...@@ -126,7 +128,7 @@ def predict_scenes(ds, model, optimizer, state_file,
_ = model.load(state_file.name, optimizer, state_file.parent) _ = model.load(state_file.name, optimizer, state_file.parent)
# set the model to evaluation mode # set the model to evaluation mode
print('Setting model to evaluation mode ...') LOGGER.info('Setting model to evaluation mode ...')
model.eval() model.eval()
model.to(device) model.to(device)
...@@ -147,7 +149,7 @@ def predict_scenes(ds, model, optimizer, state_file, ...@@ -147,7 +149,7 @@ def predict_scenes(ds, model, optimizer, state_file,
scene_size = (ds.dataset.height, ds.dataset.width) scene_size = (ds.dataset.height, ds.dataset.width)
# iterate over the scenes # iterate over the scenes
print('Predicting scenes of the {} dataset ...'.format(ds.name)) LOGGER.info('Predicting scenes of the {} dataset ...'.format(ds.name))
scenes = {} scenes = {}
for i, sid in enumerate(scene_ids): for i, sid in enumerate(scene_ids):
...@@ -187,7 +189,7 @@ def predict_scenes(ds, model, optimizer, state_file, ...@@ -187,7 +189,7 @@ def predict_scenes(ds, model, optimizer, state_file,
prdtcn = reconstruct_scene(prd, scene_size, nbands=1) prdtcn = reconstruct_scene(prd, scene_size, nbands=1)
# print progress # print progress
print('Scene {:d}/{:d}, Id: {}, Accuracy: {:.2f}'.format( LOGGER.info('Scene {:d}/{:d}, Id: {}, Accuracy: {:.2f}'.format(
i + 1, len(scene_ids), sid, accuracy_function(prdtcn, labels))) i + 1, len(scene_ids), sid, accuracy_function(prdtcn, labels)))
# save outputs to dictionary # save outputs to dictionary
......
...@@ -7,6 +7,7 @@ Created on Tue Jul 14 15:02:23 2020 ...@@ -7,6 +7,7 @@ Created on Tue Jul 14 15:02:23 2020
# builtins # builtins
import os import os
import re import re
import logging
import datetime import datetime
# externals # externals
...@@ -17,6 +18,9 @@ import numpy as np ...@@ -17,6 +18,9 @@ import numpy as np
# the following functions are utility functions for common image # the following functions are utility functions for common image
# manipulation operations # manipulation operations
# module level logger
LOGGER = logging.getLogger(__name__)
# this function reads an image to a numpy array # this function reads an image to a numpy array
def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False): def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False):
...@@ -36,7 +40,7 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False): ...@@ -36,7 +40,7 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False):
width = img.RasterXSize width = img.RasterXSize
elif path is None: elif path is None:
print('Path is of NoneType, returning.') LOGGER.warning('Path is of NoneType, returning.')
return return
# accept numpy arrays as input # accept numpy arrays as input
...@@ -81,13 +85,12 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False): ...@@ -81,13 +85,12 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False):
y_size = height + padding[0] + padding[2] y_size = height + padding[0] + padding[2]
x_size = width + padding[1] + padding[3] x_size = width + padding[1] + padding[3]
if verbose: # print progress
print('Image size: {}'.format((height, width))) LOGGER.debug('Image size: {}'.format((height, width)))
print('Dividing image into {} tiles of size {} ...' LOGGER.debug('Dividing image into {} tiles of size {} ...'
.format(ntiles, (tile_size, tile_size))) .format(ntiles, (tile_size, tile_size)))
print('Padding image: bottom = {}, left = {}, top = {}, right = {}' LOGGER.debug('Padding image (b, l, t, r): {}'.format(tuple(padding)))
' ...'.format(*list(padding))) LOGGER.debug('Padded image size: {}'.format((y_size, x_size)))
print('Padded image size: {}'.format((y_size, x_size)))
# get the indices of the top left corner for each tile # get the indices of the top left corner for each tile
topleft = tile_topleft_corner((y_size, x_size), tile_size) topleft = tile_topleft_corner((y_size, x_size), tile_size)
...@@ -102,21 +105,18 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False): ...@@ -102,21 +105,18 @@ def img2np(path, tile_size=None, tile=None, pad=False, cval=0, verbose=False):
# iterate over the topleft corners of the tiles # iterate over the topleft corners of the tiles
for k, corner in topleft.items(): for k, corner in topleft.items():
if verbose:
print('Creating tile {} with top-left corner {} ...'
.format(k, corner))
# in case a single tile is required, skip the rest of the tiles # in case a single tile is required, skip the rest of the tiles
if tile is not None: if tile is not None:
if k != tile: if k != tile:
if verbose:
print('Skipping tile {} ...'.format(k))
continue continue
# set the key to 0 for correct array indexing when reading # set the key to 0 for correct array indexing when reading
# a single tile from the image # a single tile from the image
if verbose: LOGGER.debug('Processing tile {} ...'.format(k))
print('Processing tile {} ...'.format(k))
k = 0 k = 0
else:
LOGGER.debug('Creating tile {} with top-left corner {} ...'
.format(k, corner))
# calculate shift between padded and original image # calculate shift between padded and original image
row = corner[0] - padding[2] if corner[0] > 0 else corner[0] row = corner[0] - padding[2] if corner[0] > 0 else corner[0]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment