Module monk.tf_keras_1.training.callbacks

Expand source code
from tf_keras_1.training.imports import *
from system.imports import *


class TimeHistory(krc.Callback):
    def __init__(self, log_dir=None):
        super().__init__()
        if(log_dir):
            self.log_file = log_dir + "times.txt";
            self.f = open(self.log_file, 'a');
        else:
            self.log_file=None
        

    def on_train_begin(self, logs={}):
        self.times = [];        

    def on_train_end(self, logs={}):
        if(self.log_file):
            self.f.close();        

    def on_epoch_begin(self, epoch, logs={}):
        self.epoch_time_start = time.time()

    def on_epoch_end(self, epoch, logs={}):
        self.times.append(time.time() - self.epoch_time_start)
        if(self.log_file):
            self.f.write(str(time.time() - self.epoch_time_start) + "\n");



class MemoryHistory(krc.Callback):
    def __init__(self):
        super().__init__()
        self.max_gpu_usage=0;
        
    def on_train_begin(self, logs={}):
        return
 
    def on_train_end(self, logs={}):
        return
 
    def on_epoch_begin(self, batch, logs={}):
        return
 
    def on_epoch_end(self, batch, logs={}):
        import GPUtil
        GPUs = GPUtil.getGPUs()
        if(len(GPUs) > 0):
            gpuMemoryUsed = GPUs[0].memoryUsed
            if(self.max_gpu_usage < int(gpuMemoryUsed)):
                self.max_gpu_usage = int(gpuMemoryUsed);
        return

Classes

class MemoryHistory

Abstract base class used to build new callbacks.

Properties

params: dict. Training parameters
    (eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
    Reference of the model being trained.

The logs dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch.

Currently, the .fit() method of the Sequential model class will include the following quantities in the logs that it passes to its callbacks:

on_epoch_end: logs include `acc` and `loss`, and
    optionally include `val_loss`
    (if validation is enabled in `fit`), and `val_acc`
    (if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
    the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
    (if accuracy monitoring is enabled).
Expand source code
class MemoryHistory(krc.Callback):
    def __init__(self):
        super().__init__()
        self.max_gpu_usage=0;
        
    def on_train_begin(self, logs={}):
        return
 
    def on_train_end(self, logs={}):
        return
 
    def on_epoch_begin(self, batch, logs={}):
        return
 
    def on_epoch_end(self, batch, logs={}):
        import GPUtil
        GPUs = GPUtil.getGPUs()
        if(len(GPUs) > 0):
            gpuMemoryUsed = GPUs[0].memoryUsed
            if(self.max_gpu_usage < int(gpuMemoryUsed)):
                self.max_gpu_usage = int(gpuMemoryUsed);
        return

Ancestors

  • keras.callbacks.Callback

Methods

def on_epoch_begin(self, batch, logs={})

Called at the start of an epoch.

Subclasses should override for any actions to run. This function should only be called during train mode.

Arguments

epoch: integer, index of epoch.
logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_epoch_begin(self, batch, logs={}):
    return
def on_epoch_end(self, batch, logs={})

Called at the end of an epoch.

Subclasses should override for any actions to run. This function should only be called during train mode.

Arguments

epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
    validation epoch if validation is performed. Validation result keys
    are prefixed with `val_`.
Expand source code
def on_epoch_end(self, batch, logs={}):
    import GPUtil
    GPUs = GPUtil.getGPUs()
    if(len(GPUs) > 0):
        gpuMemoryUsed = GPUs[0].memoryUsed
        if(self.max_gpu_usage < int(gpuMemoryUsed)):
            self.max_gpu_usage = int(gpuMemoryUsed);
    return
def on_train_begin(self, logs={})

Called at the beginning of training.

Subclasses should override for any actions to run.

Arguments

logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_train_begin(self, logs={}):
    return
def on_train_end(self, logs={})

Called at the end of training.

Subclasses should override for any actions to run.

Arguments

logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_train_end(self, logs={}):
    return
class TimeHistory (log_dir=None)

Abstract base class used to build new callbacks.

Properties

params: dict. Training parameters
    (eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
    Reference of the model being trained.

The logs dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch.

Currently, the .fit() method of the Sequential model class will include the following quantities in the logs that it passes to its callbacks:

on_epoch_end: logs include `acc` and `loss`, and
    optionally include `val_loss`
    (if validation is enabled in `fit`), and `val_acc`
    (if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
    the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
    (if accuracy monitoring is enabled).
Expand source code
class TimeHistory(krc.Callback):
    def __init__(self, log_dir=None):
        super().__init__()
        if(log_dir):
            self.log_file = log_dir + "times.txt";
            self.f = open(self.log_file, 'a');
        else:
            self.log_file=None
        

    def on_train_begin(self, logs={}):
        self.times = [];        

    def on_train_end(self, logs={}):
        if(self.log_file):
            self.f.close();        

    def on_epoch_begin(self, epoch, logs={}):
        self.epoch_time_start = time.time()

    def on_epoch_end(self, epoch, logs={}):
        self.times.append(time.time() - self.epoch_time_start)
        if(self.log_file):
            self.f.write(str(time.time() - self.epoch_time_start) + "\n");

Ancestors

  • keras.callbacks.Callback

Methods

def on_epoch_begin(self, epoch, logs={})

Called at the start of an epoch.

Subclasses should override for any actions to run. This function should only be called during train mode.

Arguments

epoch: integer, index of epoch.
logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_epoch_begin(self, epoch, logs={}):
    self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={})

Called at the end of an epoch.

Subclasses should override for any actions to run. This function should only be called during train mode.

Arguments

epoch: integer, index of epoch.
logs: dict, metric results for this training epoch, and for the
    validation epoch if validation is performed. Validation result keys
    are prefixed with `val_`.
Expand source code
def on_epoch_end(self, epoch, logs={}):
    self.times.append(time.time() - self.epoch_time_start)
    if(self.log_file):
        self.f.write(str(time.time() - self.epoch_time_start) + "\n");
def on_train_begin(self, logs={})

Called at the beginning of training.

Subclasses should override for any actions to run.

Arguments

logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_train_begin(self, logs={}):
    self.times = [];        
def on_train_end(self, logs={})

Called at the end of training.

Subclasses should override for any actions to run.

Arguments

logs: dict, currently no data is passed to this argument for this method
    but that may change in the future.
Expand source code
def on_train_end(self, logs={}):
    if(self.log_file):
        self.f.close();