Module monk.pytorch.finetune.level_10_schedulers_main

Expand source code
from pytorch.finetune.imports import *
from system.imports import *

from pytorch.finetune.level_9_transforms_main import prototype_transforms


class prototype_schedulers(prototype_transforms):
    def __init__(self, verbose=1):
        super().__init__(verbose=verbose);


    ###############################################################################################################################################
    def lr_fixed(self):
        '''
        Set learning rate fixed

        Args:
            None

        Returns:
            None
        '''
        self.system_dict = scheduler_fixed(self.system_dict);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in regular steps

        Args:
            step_size (int): Step interval for decreasing learning rate
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_multistep_decrease(self, milestones, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in irregular steps

        Args:
            milestones (list): List of epochs at which learning rate is to be decreased
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Dummy variable

        Returns:
            None
        '''
        self.system_dict = scheduler_multistep(self.system_dict, milestones, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################





    ###############################################################################################################################################
    def lr_exponential_decrease(self, gamma, last_epoch=-1):
        '''
        Set learning rate to decrease exponentially every step

        Args:
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################






    ###############################################################################################################################################
    def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
        threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
        '''
        Set learning rate to decrease if a metric (loss) stagnates in a plateau

        Args:
            mode (str): Either of 
                        - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                        - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
            factor (float): Reduction multiplier for reducing learning rate post every step
            patience (int): Number of epochs to wait before reducing learning rate
            verbose (bool): If True, all computations and wait times are printed
            threshold (float): Preset fixed to 0.0001
            threshold_mode (str): Preset fixed to 'rel' mode
            cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
            min_lr (float): Set minimum learning rate, post which it will not be decreased
            epsilon (float): A small value to avoid divison by zero.
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
            threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################

Classes

class prototype_schedulers (verbose=1)

Main class for all transforms in expert mode

Args

verbose : int
Set verbosity levels 0 - Print Nothing 1 - Print desired details

Class that serves as a decorator to trace entry and exit from functions. Used by appending @TraceFunction on top of the definition of the function to trace.

Expand source code
class prototype_schedulers(prototype_transforms):
    def __init__(self, verbose=1):
        super().__init__(verbose=verbose);


    ###############################################################################################################################################
    def lr_fixed(self):
        '''
        Set learning rate fixed

        Args:
            None

        Returns:
            None
        '''
        self.system_dict = scheduler_fixed(self.system_dict);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in regular steps

        Args:
            step_size (int): Step interval for decreasing learning rate
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################




    ###############################################################################################################################################
    def lr_multistep_decrease(self, milestones, gamma=0.1, last_epoch=-1):
        '''
        Set learning rate to decrease in irregular steps

        Args:
            milestones (list): List of epochs at which learning rate is to be decreased
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Dummy variable

        Returns:
            None
        '''
        self.system_dict = scheduler_multistep(self.system_dict, milestones, gamma=gamma, last_epoch=last_epoch);
        
        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################





    ###############################################################################################################################################
    def lr_exponential_decrease(self, gamma, last_epoch=-1):
        '''
        Set learning rate to decrease exponentially every step

        Args:
            gamma (str): Reduction multiplier for reducing learning rate post every step
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");
    ###############################################################################################################################################






    ###############################################################################################################################################
    def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
        threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
        '''
        Set learning rate to decrease if a metric (loss) stagnates in a plateau

        Args:
            mode (str): Either of 
                        - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                        - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
            factor (float): Reduction multiplier for reducing learning rate post every step
            patience (int): Number of epochs to wait before reducing learning rate
            verbose (bool): If True, all computations and wait times are printed
            threshold (float): Preset fixed to 0.0001
            threshold_mode (str): Preset fixed to 'rel' mode
            cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
            min_lr (float): Set minimum learning rate, post which it will not be decreased
            epsilon (float): A small value to avoid divison by zero.
            last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

        Returns:
            None
        '''
        self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
            threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

        self.custom_print("Learning rate scheduler");
        self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
        self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
        self.custom_print("");

Ancestors

  • pytorch.finetune.level_9_transforms_main.prototype_transforms
  • pytorch.finetune.level_8_layers_main.prototype_layers
  • pytorch.finetune.level_7_aux_main.prototype_aux
  • pytorch.finetune.level_6_params_main.prototype_params
  • pytorch.finetune.level_5_state_base.finetune_state
  • pytorch.finetune.level_4_evaluation_base.finetune_evaluation
  • pytorch.finetune.level_3_training_base.finetune_training
  • pytorch.finetune.level_2_model_base.finetune_model
  • pytorch.finetune.level_1_dataset_base.finetune_dataset
  • system.base_class.system

Methods

def lr_exponential_decrease(self, gamma, last_epoch=-1)

Set learning rate to decrease exponentially every step

Args

gamma : str
Reduction multiplier for reducing learning rate post every step
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_exponential_decrease(self, gamma, last_epoch=-1):
    '''
    Set learning rate to decrease exponentially every step

    Args:
        gamma (str): Reduction multiplier for reducing learning rate post every step
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_exponential(self.system_dict, gamma, last_epoch=last_epoch);

    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_fixed(self)

Set learning rate fixed

Args

None
 

Returns

None
 
Expand source code
def lr_fixed(self):
    '''
    Set learning rate fixed

    Args:
        None

    Returns:
        None
    '''
    self.system_dict = scheduler_fixed(self.system_dict);
    
    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_multistep_decrease(self, milestones, gamma=0.1, last_epoch=-1)

Set learning rate to decrease in irregular steps

Args

milestones : list
List of epochs at which learning rate is to be decreased
gamma : str
Reduction multiplier for reducing learning rate post every step
last_epoch : int
Dummy variable

Returns

None
 
Expand source code
def lr_multistep_decrease(self, milestones, gamma=0.1, last_epoch=-1):
    '''
    Set learning rate to decrease in irregular steps

    Args:
        milestones (list): List of epochs at which learning rate is to be decreased
        gamma (str): Reduction multiplier for reducing learning rate post every step
        last_epoch (int): Dummy variable

    Returns:
        None
    '''
    self.system_dict = scheduler_multistep(self.system_dict, milestones, gamma=gamma, last_epoch=last_epoch);
    
    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08)

Set learning rate to decrease if a metric (loss) stagnates in a plateau

Args

mode : str
Either of - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing.
factor : float
Reduction multiplier for reducing learning rate post every step
patience : int
Number of epochs to wait before reducing learning rate
verbose : bool
If True, all computations and wait times are printed
threshold : float
Preset fixed to 0.0001
threshold_mode : str
Preset fixed to 'rel' mode
cooldown : int
Number of epochs to wait before actually applying the scheduler post the actual designated step
min_lr : float
Set minimum learning rate, post which it will not be decreased
epsilon : float
A small value to avoid divison by zero.
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_plateau_decrease(self, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, \
    threshold_mode='rel', cooldown=0, min_lr=0, epsilon=1e-08):
    '''
    Set learning rate to decrease if a metric (loss) stagnates in a plateau

    Args:
        mode (str): Either of 
                    - 'min' : lr will be reduced when the quantity monitored (loss) has stopped decreasing; 
                    - 'max' : lr reduced when the quantity monitored (accuracy) has stopped increasing. 
        factor (float): Reduction multiplier for reducing learning rate post every step
        patience (int): Number of epochs to wait before reducing learning rate
        verbose (bool): If True, all computations and wait times are printed
        threshold (float): Preset fixed to 0.0001
        threshold_mode (str): Preset fixed to 'rel' mode
        cooldown (int): Number of epochs to wait before actually applying the scheduler post the actual designated step
        min_lr (float): Set minimum learning rate, post which it will not be decreased
        epsilon (float): A small value to avoid divison by zero.
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_plateau(self.system_dict, mode=mode, factor=factor, patience=patience, verbose=verbose,
        threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr, epsilon=epsilon);

    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");
def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1)

Set learning rate to decrease in regular steps

Args

step_size : int
Step interval for decreasing learning rate
gamma : str
Reduction multiplier for reducing learning rate post every step
last_epoch : int
Set this epoch to a level post which learning rate will not be decreased

Returns

None
 
Expand source code
def lr_step_decrease(self, step_size, gamma=0.1, last_epoch=-1):
    '''
    Set learning rate to decrease in regular steps

    Args:
        step_size (int): Step interval for decreasing learning rate
        gamma (str): Reduction multiplier for reducing learning rate post every step
        last_epoch (int): Set this epoch to a level post which learning rate will not be decreased

    Returns:
        None
    '''
    self.system_dict = scheduler_step(self.system_dict, step_size, gamma=gamma, last_epoch=last_epoch);
    
    self.custom_print("Learning rate scheduler");
    self.custom_print("    Name:   {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["name"]));
    self.custom_print("    Params: {}".format(self.system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]));
    self.custom_print("");