mirror of
https://forge.apps.education.fr/phroy/mes-scripts-de-ml.git
synced 2024-01-27 11:30:36 +01:00
Fondamentaux : descente de gradient par mini-lots
This commit is contained in:
parent
3d87eeea70
commit
37ac892b31
@ -30,11 +30,19 @@ import time, math
|
||||
t_debut = time.time()
|
||||
|
||||
# Init des plots
|
||||
fig = plt.figure(figsize=(10, 5))
|
||||
fig = plt.figure(figsize=(15, 5))
|
||||
fig.suptitle("Descente de gradient")
|
||||
donnees_ax = fig.add_subplot(131)
|
||||
model_ax = fig.add_subplot(132)
|
||||
couts_ax = fig.add_subplot(133)
|
||||
donnees_ax = fig.add_subplot(141) # Observations : x1 et cibles : y
|
||||
model_ax = fig.add_subplot(142) # Modèle : theta0, theta1
|
||||
couts_ax = fig.add_subplot(143) # Coûts : RMSE, MSE, ...
|
||||
app_ax = fig.add_subplot(144) # Taux d'appentissage : eta
|
||||
|
||||
i_list=[] # Itération
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
eta_list=[] # Taux d'apprentissage
|
||||
|
||||
###############################################################################
|
||||
# Observations
|
||||
@ -75,22 +83,17 @@ def rmse(theta):
|
||||
theta= np.random.randn(2,1)
|
||||
theta0=[theta[0]]
|
||||
theta1=[theta[1]]
|
||||
couts_i=[]
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
delta = 0
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
|
||||
# Descente du gradient
|
||||
for i in range(n):
|
||||
i_list.append(i)
|
||||
|
||||
# Calcul du gradient du pas
|
||||
gradients = 2/m * X.T.dot(X.dot(theta) - y)
|
||||
eta_list.append(eta)
|
||||
theta = theta - eta * gradients
|
||||
theta0.append(theta[0])
|
||||
theta1.append(theta[1])
|
||||
couts_i.append(i)
|
||||
|
||||
# Calcul de l'erreur avec la norme du vecteur 2D (Objectif -> Theta) dans le plan (theta0, theta1)
|
||||
couts_2d.append(math.sqrt((theta[0]-exact_solution[0])**2+(theta[1]-exact_solution[1])**2))
|
||||
@ -131,16 +134,23 @@ model_ax.set_xlabel(r'$\theta_0$')
|
||||
model_ax.set_ylabel(r'$\theta_1 $', rotation=0)
|
||||
model_ax.legend()
|
||||
|
||||
# Plot du cout
|
||||
# Plot du coût
|
||||
couts_ax.set_title("Coûts")
|
||||
couts_ax.plot(couts_i, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
couts_ax.plot(i_list, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(i_list, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(i_list, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(i_list, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
couts_ax.set_xlabel(r'$i$')
|
||||
couts_ax.set_ylabel("Coûts")
|
||||
couts_ax.legend()
|
||||
|
||||
# Plot du taux d'appentissage
|
||||
app_ax.set_title("Taux d'appentissage")
|
||||
app_ax.plot(i_list, eta_list, '.', ls=':', color='b', fillstyle='none', label="Taux d'appentissage", markevery=10)
|
||||
app_ax.set_xlabel(r'$i$')
|
||||
app_ax.set_ylabel(r'$\eta$', rotation=0)
|
||||
# app_ax.legend()
|
||||
|
||||
plt.show()
|
||||
|
||||
# Performances
|
||||
|
@ -30,11 +30,19 @@ import time, math
|
||||
t_debut = time.time()
|
||||
|
||||
# Init des plots
|
||||
fig = plt.figure(figsize=(10, 5))
|
||||
fig = plt.figure(figsize=(15, 5))
|
||||
fig.suptitle("Descente de gradient stochastique")
|
||||
donnees_ax = fig.add_subplot(131)
|
||||
model_ax = fig.add_subplot(132)
|
||||
couts_ax = fig.add_subplot(133)
|
||||
donnees_ax = fig.add_subplot(141) # Observations : x1 et cibles : y
|
||||
model_ax = fig.add_subplot(142) # Modèle : theta0, theta1
|
||||
couts_ax = fig.add_subplot(143) # Coûts : RMSE, MSE, ...
|
||||
app_ax = fig.add_subplot(144) # Taux d'appentissage : eta
|
||||
|
||||
i_list=[] # Itération
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
eta_list=[] # Taux d'apprentissage
|
||||
|
||||
###############################################################################
|
||||
# Observations
|
||||
@ -80,16 +88,11 @@ def rmse(theta):
|
||||
theta= np.random.randn(2,1)
|
||||
theta0=[theta[0]]
|
||||
theta1=[theta[1]]
|
||||
couts_i=[]
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
delta = 0
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
|
||||
# Descente du gradient
|
||||
for epoq in range (n_epoq):
|
||||
for i in range(m):
|
||||
i_list.append(epoq * m + i)
|
||||
|
||||
# Calcul du gradient du pas
|
||||
idx = np.random.randint(m) # Index aléatoire
|
||||
@ -97,10 +100,10 @@ for epoq in range (n_epoq):
|
||||
yi = y[idx : idx+1]
|
||||
gradients = 2/1 * xi.T.dot(xi.dot(theta) - yi)
|
||||
eta = ech_app (epoq * m + i)
|
||||
eta_list.append(eta)
|
||||
theta = theta - eta * gradients
|
||||
theta0.append(theta[0])
|
||||
theta1.append(theta[1])
|
||||
couts_i.append(epoq * m + i)
|
||||
|
||||
# Calcul de l'erreur avec la norme du vecteur 2D (Objectif -> Theta) dans le plan (theta0, theta1)
|
||||
couts_2d.append(math.sqrt((theta[0]-exact_solution[0])**2+(theta[1]-exact_solution[1])**2))
|
||||
@ -143,15 +146,22 @@ model_ax.legend()
|
||||
|
||||
# Plot du cout
|
||||
couts_ax.set_title("Coûts")
|
||||
couts_ax.plot(couts_i, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
couts_ax.plot(i_list, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(i_list, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(i_list, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(i_list, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
# couts_ax.plot(couts_i, couts_rmse, color='g', label="Coûts RMSE")
|
||||
couts_ax.set_xlabel(r'$i$')
|
||||
couts_ax.set_ylabel("Coûts")
|
||||
couts_ax.legend()
|
||||
|
||||
# Plot du taux d'appentissage
|
||||
app_ax.set_title("Taux d'appentissage")
|
||||
app_ax.plot(i_list, eta_list, '.', ls=':', color='b', fillstyle='none', label="Taux d'appentissage", markevery=10)
|
||||
app_ax.set_xlabel(r'$i$')
|
||||
app_ax.set_ylabel(r'$\eta$', rotation=0)
|
||||
# app_ax.legend()
|
||||
|
||||
plt.show()
|
||||
|
||||
# Performances
|
||||
|
@ -30,11 +30,19 @@ import time, math
|
||||
t_debut = time.time()
|
||||
|
||||
# Init des plots
|
||||
fig = plt.figure(figsize=(10, 5))
|
||||
fig = plt.figure(figsize=(15, 5))
|
||||
fig.suptitle("Descente de gradient par mini-lots")
|
||||
donnees_ax = fig.add_subplot(131)
|
||||
model_ax = fig.add_subplot(132)
|
||||
couts_ax = fig.add_subplot(133)
|
||||
donnees_ax = fig.add_subplot(141) # Observations : x1 et cibles : y
|
||||
model_ax = fig.add_subplot(142) # Modèle : theta0, theta1
|
||||
couts_ax = fig.add_subplot(143) # Coûts : RMSE, MSE, ...
|
||||
app_ax = fig.add_subplot(144) # Taux d'appentissage : eta
|
||||
|
||||
i_list=[] # Itération
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
eta_list=[] # Taux d'apprentissage
|
||||
|
||||
###############################################################################
|
||||
# Observations
|
||||
@ -63,9 +71,32 @@ X_new = np.c_[np.ones((2, 1)), x1_new] # Matrice des observations, avec x0=1
|
||||
# - eta : taux d'appentissage ici dégressif par échéancier d'apprentissage (ech_app)
|
||||
|
||||
# n_epoq = 50 # Nombre d'époques
|
||||
n_epoq = 2 # Nombre d'époques (hyperparamètre)
|
||||
n_epoq = 20 # Nombre d'époques (hyperparamètre)
|
||||
lot_taille = 20 # Taille d'un mini-lot (hyperparamètre)
|
||||
|
||||
|
||||
# def mini_batch_gradient_descent():
|
||||
# n_iterations = 50
|
||||
# minibatch_size = 20
|
||||
# t0, t1 = 200, 1000
|
||||
# thetas = np.random.randn(2, 1)
|
||||
# thetas_path = [thetas]
|
||||
# t = 0
|
||||
# for epoch in range(n_iterations):
|
||||
# shuffled_indices = np.random.permutation(m)
|
||||
# X_b_shuffled = X_b[shuffled_indices]
|
||||
# y_shuffled = y[shuffled_indices]
|
||||
# for i in range(0, m, minibatch_size):
|
||||
# t += 1
|
||||
# xi = X_b_shuffled[i:i+minibatch_size]
|
||||
# yi = y_shuffled[i:i+minibatch_size]
|
||||
# gradients = 2*xi.T.dot(xi.dot(thetas) - yi)/minibatch_size
|
||||
# eta = learning_schedule(t, t0, t1)
|
||||
# thetas = thetas - eta*gradients
|
||||
# thetas_path.append(thetas)
|
||||
|
||||
# Rédéfinition du taux d'apprentissage à partir de l'échéancier d'apprentissage
|
||||
# t0, t1 = 200, 1000
|
||||
t0, t1 = 5, 50 # Facteurs de l'échéancier d'apprentissage (hyperparamètres)
|
||||
def ech_app (t):
|
||||
return t0 / (t + t1)
|
||||
@ -80,27 +111,27 @@ def rmse(theta):
|
||||
theta= np.random.randn(2,1)
|
||||
theta0=[theta[0]]
|
||||
theta1=[theta[1]]
|
||||
couts_i=[]
|
||||
couts_2d=[]
|
||||
couts_delta=[]
|
||||
delta = 0
|
||||
couts_mse=[] # MSE
|
||||
couts_rmse=[] # RMSE
|
||||
|
||||
# Descente du gradient
|
||||
for epoq in range (n_epoq):
|
||||
for i in range(m):
|
||||
|
||||
# Mélange des observations
|
||||
indices_melange = np.random.permutation(m)
|
||||
X_melange = X[indices_melange]
|
||||
y_melange = y[indices_melange]
|
||||
|
||||
for i in range(0, m, lot_taille):
|
||||
i_list.append(epoq * (m/lot_taille) + i/lot_taille)
|
||||
|
||||
# Calcul du gradient du pas
|
||||
idx = np.random.randint(m) # Index aléatoire
|
||||
xi = X[idx : idx+1]
|
||||
yi = y[idx : idx+1]
|
||||
gradients = 2/1 * xi.T.dot(xi.dot(theta) - yi)
|
||||
eta = ech_app (epoq * m + i)
|
||||
theta = theta - eta * gradients
|
||||
xi = X_melange[i:i+lot_taille]
|
||||
yi = y_melange[i:i+lot_taille]
|
||||
gradients = 2*xi.T.dot(xi.dot(theta) - yi)/lot_taille
|
||||
eta = ech_app (epoq * (m/lot_taille) + i/lot_taille)
|
||||
eta_list.append(eta)
|
||||
theta = theta - eta*gradients
|
||||
theta0.append(theta[0])
|
||||
theta1.append(theta[1])
|
||||
couts_i.append(epoq * m + i)
|
||||
|
||||
# Calcul de l'erreur avec la norme du vecteur 2D (Objectif -> Theta) dans le plan (theta0, theta1)
|
||||
couts_2d.append(math.sqrt((theta[0]-exact_solution[0])**2+(theta[1]-exact_solution[1])**2))
|
||||
@ -141,17 +172,23 @@ model_ax.set_xlabel(r'$\theta_0$')
|
||||
model_ax.set_ylabel(r'$\theta_1 $', rotation=0)
|
||||
model_ax.legend()
|
||||
|
||||
# Plot du cout
|
||||
# Plot du coût
|
||||
couts_ax.set_title("Coûts")
|
||||
couts_ax.plot(couts_i, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(couts_i, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
# couts_ax.plot(couts_i, couts_rmse, color='g', label="Coûts RMSE")
|
||||
couts_ax.plot(i_list, couts_2d, '.', ls=':', color='c', fillstyle='none', label="Coûts vecteur 2D", markevery=10)
|
||||
couts_ax.plot(i_list, couts_delta, '.', ls=':', color='r', fillstyle='none', label="Coûts RMSE à la main", markevery=10)
|
||||
couts_ax.plot(i_list, couts_mse, '.', ls=':', color='b', fillstyle='none', label="Coûts MSE", markevery=10)
|
||||
couts_ax.plot(i_list, couts_rmse, '.', ls=':', color='g', fillstyle='none', label="Coûts RMSE", markevery=10)
|
||||
couts_ax.set_xlabel(r'$i$')
|
||||
couts_ax.set_ylabel("Coûts")
|
||||
couts_ax.legend()
|
||||
|
||||
# Plot du taux d'appentissage
|
||||
app_ax.set_title("Taux d'appentissage")
|
||||
app_ax.plot(i_list, eta_list, '.', ls=':', color='b', fillstyle='none', label="Taux d'appentissage", markevery=10)
|
||||
app_ax.set_xlabel(r'$i$')
|
||||
app_ax.set_ylabel(r'$\eta$', rotation=0)
|
||||
# app_ax.legend()
|
||||
|
||||
plt.show()
|
||||
|
||||
# Performances
|
Binary file not shown.
Before Width: | Height: | Size: 458 KiB After Width: | Height: | Size: 452 KiB |
BIN
fondamentaux/img/03-descente_gradient_mini-lots.png
Normal file
BIN
fondamentaux/img/03-descente_gradient_mini-lots.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 440 KiB |
Binary file not shown.
Before Width: | Height: | Size: 425 KiB After Width: | Height: | Size: 462 KiB |
Loading…
Reference in New Issue
Block a user