Module monk.tf_keras_1.schedulers.common

Expand source code
from tf_keras_1.schedulers.imports import *
from system.imports import *



# Code inspired from: https://www.pyimagesearch.com/2019/07/22/keras-learning-rate-schedules-and-decay/

#Level - 1 - Helper classes - Keras
class LearningRateDecay:
    def plot(self, epochs, title="Learning Rate Schedule"):
        # compute the set of learning rates for each corresponding
        # epoch
        lrs = [self(i) for i in epochs]
 
        # the learning rate schedule
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(epochs, lrs)
        plt.title(title)
        plt.xlabel("Epoch #")
        plt.ylabel("Learning Rate")
        

class StepDecay(LearningRateDecay):
    def __init__(self, initAlpha=0.01, factor=0.25, dropEvery=10):
        # store the base initial learning rate, drop factor, and
        # epochs to drop every
        self.initAlpha = initAlpha
        self.factor = factor
        self.dropEvery = dropEvery
 
    def __call__(self, epoch):
        # compute the learning rate for the current epoch
        exp = np.floor((1 + epoch) / self.dropEvery)
        alpha = self.initAlpha * (self.factor ** exp)
 
        # return the learning rate
        print("New Lr: ", float(alpha))
        return float(alpha)


class PolynomialDecay(LearningRateDecay):
    def __init__(self, maxEpochs=100, initAlpha=0.01, power=1.0):
        # store the maximum number of epochs, base learning rate,
        # and power of the polynomial
        self.maxEpochs = maxEpochs
        self.initAlpha = initAlpha
        self.power = power
 
    def __call__(self, epoch):
        # compute the new learning rate based on polynomial decay
        decay = (1 - (epoch / float(self.maxEpochs))) ** self.power
        alpha = self.initAlpha * decay
 
        # return the learning rate
        print("New Lr: ", float(alpha))
        return float(alpha)

Classes

class LearningRateDecay (*args, **kwargs)
Expand source code
class LearningRateDecay:
    def plot(self, epochs, title="Learning Rate Schedule"):
        # compute the set of learning rates for each corresponding
        # epoch
        lrs = [self(i) for i in epochs]
 
        # the learning rate schedule
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(epochs, lrs)
        plt.title(title)
        plt.xlabel("Epoch #")
        plt.ylabel("Learning Rate")

Subclasses

Methods

def plot(self, epochs, title='Learning Rate Schedule')
Expand source code
def plot(self, epochs, title="Learning Rate Schedule"):
    # compute the set of learning rates for each corresponding
    # epoch
    lrs = [self(i) for i in epochs]

    # the learning rate schedule
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(epochs, lrs)
    plt.title(title)
    plt.xlabel("Epoch #")
    plt.ylabel("Learning Rate")
class PolynomialDecay (maxEpochs=100, initAlpha=0.01, power=1.0)
Expand source code
class PolynomialDecay(LearningRateDecay):
    def __init__(self, maxEpochs=100, initAlpha=0.01, power=1.0):
        # store the maximum number of epochs, base learning rate,
        # and power of the polynomial
        self.maxEpochs = maxEpochs
        self.initAlpha = initAlpha
        self.power = power
 
    def __call__(self, epoch):
        # compute the new learning rate based on polynomial decay
        decay = (1 - (epoch / float(self.maxEpochs))) ** self.power
        alpha = self.initAlpha * decay
 
        # return the learning rate
        print("New Lr: ", float(alpha))
        return float(alpha)

Ancestors

class StepDecay (initAlpha=0.01, factor=0.25, dropEvery=10)
Expand source code
class StepDecay(LearningRateDecay):
    def __init__(self, initAlpha=0.01, factor=0.25, dropEvery=10):
        # store the base initial learning rate, drop factor, and
        # epochs to drop every
        self.initAlpha = initAlpha
        self.factor = factor
        self.dropEvery = dropEvery
 
    def __call__(self, epoch):
        # compute the learning rate for the current epoch
        exp = np.floor((1 + epoch) / self.dropEvery)
        alpha = self.initAlpha * (self.factor ** exp)
 
        # return the learning rate
        print("New Lr: ", float(alpha))
        return float(alpha)

Ancestors