Module monk.tf_keras_1.finetune.level_10_schedulers_main

Expand source code
from tf_keras_1.finetune.imports import *
from system.imports import *

from tf_keras_1.finetune.level_9_transforms_main import prototype_transforms


class prototype_schedulers(prototype_transforms):
    def __init__(self, verbose=1):
        super().__init__(verbose=verbose);


    ###############################################################################################################################################
    def lr_fixed(self):
        '''
        Set learning rate fixed

        Args:
            None

        Returns:
            None
        '''
        self.system_dict = scheduler_fixed(self.system_dict);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in regular steps

        Args:
            step_size (int): Step interval for decreasing learning rate
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_exponential_decrease(self, gamma, last_epoch=-1):
        '''
        Set learning rate to decrease exponentially every step

        Args:
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################






    ###############################################################################################################################################
    def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
        threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
        '''
        Set learning rate to decrease if a metric (loss) stagnates in a plateau

        Args:
            mode (str): Either of 
                        - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                        - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
            factor (float): Reduction multiplier for reducing learning rate post every step
            patience (int): Number of epochs to wait before reducing learning rate
            verbose (bool): If True, all computations and wait times are printed
            threshold (float): Preset fixed to 0.0001
            threshold_mode (str): Preset fixed to 'rel' mode
            cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
            min_lr (float): Set minimum learning rate, post which it will not be decreased
            epsilon (float): A small value to avoid divison by zero.
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
            threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################

Classes

class prototype_schedulers (verbose=1)

Main class for all transforms in expert mode

Args

verbose : int
Set verbosity levels 0 - Print Nothing 1 - Print desired details
Expand source code
class prototype_schedulers(prototype_transforms):
    def __init__(self, verbose=1):
        super().__init__(verbose=verbose);


    ###############################################################################################################################################
    def lr_fixed(self):
        '''
        Set learning rate fixed

        Args:
            None

        Returns:
            None
        '''
        self.system_dict = scheduler_fixed(self.system_dict);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in regular steps

        Args:
            step_size (int): Step interval for decreasing learning rate
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_exponential_decrease(self, gamma, last_epoch=-1):
        '''
        Set learning rate to decrease exponentially every step

        Args:
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################






    ###############################################################################################################################################
    def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
        threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
        '''
        Set learning rate to decrease if a metric (loss) stagnates in a plateau

        Args:
            mode (str): Either of 
                        - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                        - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
            factor (float): Reduction multiplier for reducing learning rate post every step
            patience (int): Number of epochs to wait before reducing learning rate
            verbose (bool): If True, all computations and wait times are printed
            threshold (float): Preset fixed to 0.0001
            threshold_mode (str): Preset fixed to 'rel' mode
            cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
            min_lr (float): Set minimum learning rate, post which it will not be decreased
            epsilon (float): A small value to avoid divison by zero.
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
            threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");

Ancestors

  • tf_keras_1.finetune.level_9_transforms_main.prototype_transforms
  • tf_keras_1.finetune.level_8_layers_main.prototype_layers
  • tf_keras_1.finetune.level_7_aux_main.prototype_aux
  • tf_keras_1.finetune.level_6_params_main.prototype_params
  • tf_keras_1.finetune.level_5_state_base.finetune_state
  • tf_keras_1.finetune.level_4_evaluation_base.finetune_evaluation
  • tf_keras_1.finetune.level_3_training_base.finetune_training
  • tf_keras_1.finetune.level_2_model_base.finetune_model
  • tf_keras_1.finetune.level_1_dataset_base.finetune_dataset
  • system.base_class.system

Methods

def lr_exponential_decrease(self, gamma, last_epoch=-1)

Set learning rate to decrease exponentially every step

Args

gamma : str
Reduction multiplier for reducing learning rate post every step
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_exponential_decrease(self, gamma, last_epoch=-1):
    '''
    Set learning rate to decrease exponentially every step

    Args:
        gamma (str): Reduction multiplier for reducing learning rate post every step
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_fixed(self)

Set learning rate fixed

Args

None
 

Returns

None
 
Expand source code
def lr_fixed(self):
    '''
    Set learning rate fixed

    Args:
        None

    Returns:
        None
    '''
    self.system_dict = scheduler_fixed(self.system_dict);
    
    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08)

Set learning rate to decrease if a metric (loss) stagnates in a plateau

Args

mode : str
Either of - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing.
factor : float
Reduction multiplier for reducing learning rate post every step
patience : int
Number of epochs to wait before reducing learning rate
verbose : bool
If True, all computations and wait times are printed
threshold : float
Preset fixed to 0.0001
threshold_mode : str
Preset fixed to 'rel' mode
cooldown : int
Number of epochs to wait before actually applying the scheduler post the actual designated step
min_lr : float
Set minimum learning rate, post which it will not be decreased
epsilon : float
A small value to avoid divison by zero.
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
    threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
    '''
    Set learning rate to decrease if a metric (loss) stagnates in a plateau

    Args:
        mode (str): Either of 
                    - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                    - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
        factor (float): Reduction multiplier for reducing learning rate post every step
        patience (int): Number of epochs to wait before reducing learning rate
        verbose (bool): If True, all computations and wait times are printed
        threshold (float): Preset fixed to 0.0001
        threshold_mode (str): Preset fixed to 'rel' mode
        cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
        min_lr (float): Set minimum learning rate, post which it will not be decreased
        epsilon (float): A small value to avoid divison by zero.
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
        threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1)

Set learning rate to decrease in regular steps

Args

step_size : int
Step interval for decreasing learning rate
gamma : str
Reduction multiplier for reducing learning rate post every step
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
    '''
    Set learning rate to decrease in regular steps

    Args:
        step_size (int): Step interval for decreasing learning rate
        gamma (str): Reduction multiplier for reducing learning rate post every step
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
    
    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");