diff --git a/Regression/Couples.py b/Regression/Couples.py index 29d0a7c..6aa79d3 100644 --- a/Regression/Couples.py +++ b/Regression/Couples.py @@ -12,13 +12,13 @@ import tqdm from scipy.optimize import least_squares from itertools import combinations +import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # data preprocessing - Data = pd.read_excel('Gnielinski Noise.xlsx', index_col = 0) train_df, test_df = train_test_split(Data, test_size=0.15, random_state=0) @@ -27,9 +27,10 @@ y_train = train_df.iloc[:, -1] y_test = test_df.iloc[:, -1] +#Let the normalization layer choose how to normalize data on the training set -normalizer = tf.keras.layers.Normalization(axis=-1) -normalizer.adapt(np.array(X_train)) # Let the normalization layer choose how to normalize data on the training set +normalizer = tf.keras.layers.Normalization(axis=-1) +normalizer.adapt(np.array(X_train)) def build_and_compile_model(norm): # Define the model model = Sequential([ @@ -46,20 +47,42 @@ def build_and_compile_model(norm): # Dense(1, activation = 'linear') ]) - model.compile(loss = 'mean_absolute_error', - optimizer = Adam(0.001)) + model.compile(loss='mean_absolute_error', + optimizer=tf.keras.optimizers.Adam(0.001)) return model -es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) # Use early stopping regularization -model = build_and_compile_model(normalizer) # Build the model (fitting can be avoided if the model has already been trained and saved) +# Callbacks +es = EarlyStopping(monitor = 'val_loss', mode = 'min', + verbose = 1, patience = 50) + +checkpoint_filepath = './tmp/checkpoint_gniel' +model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath = checkpoint_filepath, + save_weights_only = True, + monitor = 'val_loss', + mode = 'min', + save_best_only = True) + +model = build_and_compile_model(normalizer) # Build the model #time history = model.fit( X_train, y_train, validation_split=0.15, - verbose=2, epochs=500, callbacks=[es]) + verbose=2, epochs=500, + callbacks = [es, model_checkpoint_callback]) + +with open('GnielHistoryDict', 'wb') as file_pi: + pickle.dump(history.history, file_pi) # Save the model history + +# Load saved weights (best model) +model.load_weights(checkpoint_filepath) +model.save('gnielinski_model.tf') # Save the weights of the best model for later use + +with open('GnielHistoryDict', "rb") as file_pi: + history = pickle.load(file_pi) test_predictions = model.predict(X_test).flatten() test_labels = y_test.values # True y over samples of the testing set @@ -89,21 +112,32 @@ def build_and_compile_model(norm): # xy=(0.02 * delta, 0.75 * delta), xytext=(0.02 * delta, 0.75 * delta), ) -ax1.set_xlabel(r"True y") -ax1.set_ylabel(r"Predicted y") +ax1.set_xlabel(r"True $\overline{\rm{Nu}}$") +ax1.set_ylabel(r"Predicted $\overline{\rm{Nu}}$") +ax1.text(-0.3, 0.9, 'a', transform=ax1.transAxes, + size=12, weight='bold') -ax2.plot(history.history['loss'], label='loss') -ax2.plot(history.history['val_loss'], label='validation loss') +ax2.plot(history['loss'], label='loss') +ax2.plot(history['val_loss'], label='validation loss') +ax2.set_yscale('log') +ax2.text(-0.35, 0.9, 'b', transform=ax2.transAxes, + size=12, weight='bold') ax2.set_xlabel('Epoch') ax2.set_ylabel('MAE') ax2.legend(frameon = False) plt.subplots_adjust(wspace=0.4) + +plt.savefig('gniel_model.svg', bbox_inches='tight') + plt.show() + +############################################################################### #starting feature grouping analysis -new_model = tf.keras.models.load_model('gnielinski_model.tf') # alternatively, load saved model + +model = tf.keras.models.load_model('gnielinski_model.tf') # alternatively, load saved model other_columns = Data.iloc[:, :-1].columns @@ -158,7 +192,6 @@ def delta(lista, Data, i): # lista.append([ll[i][0], ll[i][1][::-1]]) - N = 20 # number of points evaluated per iteration repeat = 3 # number of iterations @@ -173,6 +206,7 @@ def delta(lista, Data, i): # c = -0.5*np.ones(N) d = 0.5*np.ones(N) + for j in range(repeat): beta = np.array([np.mean(a), np.mean(b), np.mean(c), np.mean(d)]) # initial guess for successive iterations @@ -237,22 +271,22 @@ def MAIN(beta): with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds1 = new_model(x0) # to differentiate the function + preds1 = model(x0) # to differentiate the function dy_dx1 = tape.gradient(preds1, x0)[index_1] # evaluate the partial derivative of the model in x0 with respect to the first feature with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds2 = new_model(x0) + preds2 = model(x0) dy_dx2 = tape.gradient(preds2, x0)[index_2] with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds3 = new_model(x0) + preds3 = model(x0) dy_dx3 = tape.gradient(preds3, x0)[index_3] with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds4 = new_model(x0) + preds4 = model(x0) dy_dx4 = tape.gradient(preds4, x0)[index_4] grad1 = dy_dx1.numpy() # convert to array @@ -271,14 +305,14 @@ def MAIN(beta): un = univec(a0, b0, c0, d0, x0.numpy(), index_1, index_2, index_3, index_4) # get un to check for the condition of invariance (components of the gradient aligned with un in x0) - DIFF = np.array([np.dot(der, un), + DIFF = np.array([np.dot(der, un)[0], a0**2 + b0**2 - 1, c0**2 + d0**2 - 1], dtype = float) # evaluate the resiudal return(DIFF) x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 0, max_nfev = 50) # least-squares for rectangular matrices + verbose = 2, max_nfev = 20) # least-squares for rectangular matrices a[k] = x.x[0] b[k] = x.x[1] @@ -302,7 +336,8 @@ def MAIN(beta): frame = pd.DataFrame(array, columns = ['mean a', 'std a', 'mean b', 'std b', 'mean c', 'std c', 'mean d', - 'std d']) # convert to dataframe for simplicity + 'std d'], + index = lista) # convert to dataframe for simplicity # scatter plots for ind in range(len(A)): diff --git a/Regression/Dittus_simple.py b/Regression/Dittus_simple.py index 8def45f..d7983ec 100644 --- a/Regression/Dittus_simple.py +++ b/Regression/Dittus_simple.py @@ -12,6 +12,7 @@ import tqdm from scipy.optimize import broyden1, least_squares from itertools import combinations, product +import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -63,7 +64,18 @@ def build_and_compile_model(norm): # optimizer=tf.keras.optimizers.Adam(0.001)) return model -es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) # Use early stopping regularization +# Callbacks +es = EarlyStopping(monitor = 'val_loss', mode = 'min', + verbose = 1, patience = 50) + +checkpoint_filepath = './tmp/checkpoint_dittus' +model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath = checkpoint_filepath, + save_weights_only = True, + monitor = 'val_loss', + mode = 'min', + save_best_only = True) + model = build_and_compile_model(normalizer) # Build the model (fitting can be avoided if the model has already been trained and saved) #time @@ -71,14 +83,24 @@ def build_and_compile_model(norm): # X_train, y_train, validation_split=0.15, - verbose=2, epochs=500, callbacks=[es]) + verbose=2, epochs=500, + callbacks = [es, model_checkpoint_callback]) + + +with open('DittusHistoryDict', 'wb') as file_pi: + pickle.dump(history.history, file_pi) # Save the model history + +# Load saved weights (best model) +model.load_weights(checkpoint_filepath) +model.save('dittus_model.tf') # Save the weights of the best model for later use + +with open('DittusHistoryDict', "rb") as file_pi: + history = pickle.load(file_pi) test_predictions = model.predict(X_test).flatten() test_labels = y_test.values # True y over samples of the testing set -model.save('dittus_model.tf') # Save the model for later use - r2 = r2_score(test_labels, test_predictions) mae = mean_absolute_error(test_labels, test_predictions) rmse = np.sqrt(mean_squared_error(test_labels, test_predictions)) # Goodness metrics evaluation @@ -109,8 +131,9 @@ def build_and_compile_model(norm): # ax1.text(-0.3, 0.9, 'a', transform=ax1.transAxes, size=12, weight='bold') -ax2.plot(history.history['loss'], label='loss') -ax2.plot(history.history['val_loss'], label='validation loss') +ax2.plot(history['loss'], label='loss') +ax2.plot(history['val_loss'], label='validation loss') +ax2.set_yscale('log') ax2.text(-0.3, 0.9, 'b', transform=ax2.transAxes, size=12, weight='bold') @@ -120,13 +143,14 @@ def build_and_compile_model(norm): # plt.subplots_adjust(wspace=0.4) -plt.savefig('model.svg', bbox_inches='tight') +plt.savefig('dittus_model.svg', bbox_inches='tight') plt.show() +############################################################################### #starting feature grouping analysis -new_model = tf.keras.models.load_model('dittus_model.tf') # alternatively, load saved model +model = tf.keras.models.load_model('dittus_model.tf') # load saved model other_columns = X_train.columns @@ -216,14 +240,14 @@ def MAIN(beta): x0 = tf.constant(tx) # create x0 un = univec(a0, b0, x0.numpy(), index_1, index_2) # get un to check for the condition of invariance (components of the gradient aligned with un in x0) - DIFF = np.array([np.dot(der, un), + DIFF = np.array([np.dot(der, un)[0], a0**2 + b0**2 - 1], dtype = float) # evaluate the resiudal return(DIFF) x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 0, max_nfev = 50) # least-squares with Levenberg-Marquardt does not work with rectangular matrices + verbose = 1, max_nfev = 50) # least-squares with Levenberg-Marquardt does not work with rectangular matrices x.x = x.x/np.linalg.norm(x.x) a[k] = x.x[0] # save the results for each of the 100 iterations @@ -250,6 +274,8 @@ def MAIN(beta): plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.ylim(-1,1) plt.show() + + ############################################################################### @@ -371,7 +397,7 @@ def MAIN(beta): x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 0, max_nfev = 50) # least-squares for rectangular matrices + verbose = 1, max_nfev = 50) # least-squares for rectangular matrices x.x = x.x/np.linalg.norm(x.x) a[k] = x.x[0] # save the results for each of the N iterations @@ -403,4 +429,4 @@ def MAIN(beta): plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.ylim(-1,1) plt.show() - + \ No newline at end of file diff --git a/Regression/Gniel_simple.py b/Regression/Gniel_simple.py index fcb5920..e61972b 100644 --- a/Regression/Gniel_simple.py +++ b/Regression/Gniel_simple.py @@ -12,6 +12,7 @@ import tqdm from scipy.optimize import broyden1, least_squares from itertools import combinations, product +import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -63,22 +64,42 @@ def build_and_compile_model(norm): # optimizer=tf.keras.optimizers.Adam(0.001)) return model -es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) # Use early stopping regularization -model = build_and_compile_model(normalizer) # Build the model (fitting can be avoided if the model has already been trained and saved) +# Callbacks +es = EarlyStopping(monitor = 'val_loss', mode = 'min', + verbose = 1, patience = 50) + +checkpoint_filepath = './tmp/checkpoint_gniel' +model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath = checkpoint_filepath, + save_weights_only = True, + monitor = 'val_loss', + mode = 'min', + save_best_only = True) + +model = build_and_compile_model(normalizer) # Build the model #time history = model.fit( X_train, y_train, validation_split=0.15, - verbose=2, epochs=500, callbacks=[es]) + verbose=2, epochs=500, + callbacks = [es, model_checkpoint_callback]) + +with open('GnielHistoryDict', 'wb') as file_pi: + pickle.dump(history.history, file_pi) # Save the model history + +# Load saved weights (best model) +model.load_weights(checkpoint_filepath) +model.save('gnielinski_model.tf') # Save the weights of the best model for later use +with open('GnielHistoryDict', "rb") as file_pi: + history = pickle.load(file_pi) + test_predictions = model.predict(X_test).flatten() test_labels = y_test.values # True y over samples of the testing set -model.save('gnielinski_model.tf') # save model for later use - r2 = r2_score(test_labels, test_predictions) mae = mean_absolute_error(test_labels, test_predictions) rmse = np.sqrt(mean_squared_error(test_labels, test_predictions)) # goodness metrics @@ -109,9 +130,10 @@ def build_and_compile_model(norm): # ax1.text(-0.3, 0.9, 'a', transform=ax1.transAxes, size=12, weight='bold') -ax2.plot(history.history['loss'], label='loss') -ax2.plot(history.history['val_loss'], label='validation loss') -ax2.text(-0.3, 0.9, 'b', transform=ax2.transAxes, +ax2.plot(history['loss'], label='loss') +ax2.plot(history['val_loss'], label='validation loss') +ax2.set_yscale('log') +ax2.text(-0.35, 0.9, 'b', transform=ax2.transAxes, size=12, weight='bold') ax2.set_xlabel('Epoch') @@ -120,13 +142,14 @@ def build_and_compile_model(norm): # plt.subplots_adjust(wspace=0.4) -plt.savefig('model.svg', bbox_inches='tight') +plt.savefig('gniel_model.svg', bbox_inches='tight') plt.show() +############################################################################### #starting feature grouping analysis -new_model = tf.keras.models.load_model('gnielinski_model.tf') # alternatively, load saved model instead of training a new one +model = tf.keras.models.load_model('gnielinski_model.tf') # load saved model instead of training a new one other_columns = X_train.columns @@ -216,14 +239,14 @@ def MAIN(beta): x0 = tf.constant(tx) # create x0 un = univec(a0, b0, x0.numpy(), index_1, index_2) # get un to check for the condition of invariance (components of the gradient aligned with un in x0) - DIFF = np.array([np.dot(der, un), + DIFF = np.array([np.dot(der, un)[0], a0**2 + b0**2 - 1], dtype = float) # evaluate the resiudal return(DIFF) x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 2, max_nfev = 50) # least-squares for rectangular matrices + verbose = 1, max_nfev = 50) # least-squares for rectangular matrices x.x = x.x/np.linalg.norm(x.x) a[k] = x.x[0] # save the results for each of the N iterations @@ -251,4 +274,4 @@ def MAIN(beta): plt.show() filename = str(l[ind]) + '.svg' plt.savefig(filename) - + \ No newline at end of file diff --git a/Regression/Gravitational.py b/Regression/Gravitational.py index 32662ca..5bffb10 100644 --- a/Regression/Gravitational.py +++ b/Regression/Gravitational.py @@ -13,6 +13,7 @@ from scipy.optimize import least_squares from sklearn.preprocessing import StandardScaler from itertools import combinations +import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -66,7 +67,18 @@ def build_and_compile_model(norm): # optimizer = Adam(0.001)) return model -es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) # Use early stopping regularization +# Callbacks +es = EarlyStopping(monitor = 'val_loss', mode = 'min', + verbose = 1, patience = 50) + +checkpoint_filepath = './tmp/checkpoint_newton' +model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath = checkpoint_filepath, + save_weights_only = True, + monitor = 'val_loss', + mode = 'min', + save_best_only = True) + model = build_and_compile_model(normalizer) # Build the model #time @@ -74,14 +86,23 @@ def build_and_compile_model(norm): # X_train, y_train, validation_split=0.15, - verbose=2, epochs=500, callbacks=[es]) + verbose=2, epochs=500, + callbacks = [es, model_checkpoint_callback]) + +with open('NewtonHistoryDict', 'wb') as file_pi: + pickle.dump(history.history, file_pi) # Save the model history + +# Load saved weights (best model) +model.load_weights(checkpoint_filepath) +model.save('newton_model.tf') # Save the weights of the best model for later use +with open('NewtonHistoryDict', "rb") as file_pi: + history = pickle.load(file_pi) + test_predictions = model.predict(X_test).flatten() test_labels = y_test.values # True y over samples of the testing set -model.save('newton_model.tf') # save model for later use - r2 = r2_score(test_labels, test_predictions) mae = mean_absolute_error(test_labels, test_predictions) rmse = np.sqrt(mean_squared_error(test_labels, test_predictions)) # goodness metrics @@ -112,8 +133,9 @@ def build_and_compile_model(norm): # ax1.text(-0.3, 0.9, 'a', transform=ax1.transAxes, size=12, weight='bold') -ax2.plot(history.history['loss'], label='loss') -ax2.plot(history.history['val_loss'], label='validation loss') +ax2.plot(history['loss'], label='loss') +ax2.plot(history['val_loss'], label='validation loss') +ax2.set_yscale('log') ax2.text(-0.3, 0.9, 'b', transform=ax2.transAxes, size=12, weight='bold') @@ -122,13 +144,14 @@ def build_and_compile_model(norm): # ax2.legend(frameon = False) plt.subplots_adjust(wspace=0.4) -plt.savefig('model.svg', bbox_inches='tight') +plt.savefig('newton_model.svg', bbox_inches='tight') plt.show() +############################################################################### #starting feature grouping analysis -new_model = tf.keras.models.load_model('newton_model.tf') # alternatively, load saved model +model = tf.keras.models.load_model('newton_model.tf') # load saved model other_columns = Data.iloc[:,:-1].columns @@ -198,12 +221,12 @@ def MAIN(beta): with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds1 = new_model(x0) # to differentiate the DNN model + preds1 = model(x0) # to differentiate the DNN model dy_dx1 = tape.gradient(preds1, x0)[index_1] # evaluate the partial derivative of the model in x0 with respect to the first feature with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds2 = new_model(x0) + preds2 = model(x0) dy_dx2 = tape.gradient(preds2, x0)[index_2] @@ -219,14 +242,14 @@ def MAIN(beta): x0 = tf.constant(tx) # create x0 un = univec(a0, b0) # get un to check for the condition of invariance (components of the gradient aligned with un in x0) - DIFF = np.array([np.dot(der, un), + DIFF = np.array([np.dot(der, un)[0], a0**2 + b0**2 - 1], dtype = float) # evaluate the resiudal return(DIFF) x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 0, max_nfev = 50) # least-squares with Levenberg-Marquardt does not work with rectangular matrices + verbose = 1, max_nfev = 50) # least-squares with Levenberg-Marquardt does not work with rectangular matrices x.x = x.x/np.linalg.norm(x.x) a[k] = x.x[0] # save the results for each of the N iterations @@ -251,4 +274,4 @@ def MAIN(beta): plt.title(l[ind]) plt.ylim(-1,1) plt.show() - + \ No newline at end of file diff --git a/Regression/Triple-couple.py b/Regression/Triple-couple.py index 61ac07d..3ed68b3 100644 --- a/Regression/Triple-couple.py +++ b/Regression/Triple-couple.py @@ -12,13 +12,13 @@ import tqdm from scipy.optimize import least_squares from itertools import combinations, product +import pickle import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # data preprocessing - Data = pd.read_excel('Gnielinski Noise.xlsx', index_col = 0) train_df, test_df = train_test_split(Data, test_size=0.15, random_state=0) @@ -27,9 +27,10 @@ y_train = train_df.iloc[:, -1] y_test = test_df.iloc[:, -1] +#Let the normalization layer choose how to normalize data on the training set -normalizer = tf.keras.layers.Normalization(axis=-1) -normalizer.adapt(np.array(X_train)) # Let the normalization layer choose how to normalize data on the training set +normalizer = tf.keras.layers.Normalization(axis=-1) +normalizer.adapt(np.array(X_train)) def build_and_compile_model(norm): # Define the model model = Sequential([ @@ -46,11 +47,22 @@ def build_and_compile_model(norm): # Dense(1, activation = 'linear') ]) - model.compile(loss = 'mean_absolute_error', - optimizer = Adam(0.001)) + model.compile(loss='mean_absolute_error', + optimizer=tf.keras.optimizers.Adam(0.001)) return model -es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) # Use early stopping regularization +# Callbacks +es = EarlyStopping(monitor = 'val_loss', mode = 'min', + verbose = 1, patience = 50) + +checkpoint_filepath = './tmp/checkpoint_gniel' +model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( + filepath = checkpoint_filepath, + save_weights_only = True, + monitor = 'val_loss', + mode = 'min', + save_best_only = True) + model = build_and_compile_model(normalizer) # Build the model #time @@ -58,9 +70,20 @@ def build_and_compile_model(norm): # X_train, y_train, validation_split=0.15, - verbose=2, epochs=500, callbacks=[es]) + verbose=2, epochs=500, + callbacks = [es, model_checkpoint_callback]) + +with open('GnielHistoryDict', 'wb') as file_pi: + pickle.dump(history.history, file_pi) # Save the model history + +# Load saved weights (best model) +model.load_weights(checkpoint_filepath) +model.save('gnielinski_model.tf') # Save the weights of the best model for later use +with open('GnielHistoryDict', "rb") as file_pi: + history = pickle.load(file_pi) + test_predictions = model.predict(X_test).flatten() test_labels = y_test.values # True y over samples of the testing set @@ -89,21 +112,31 @@ def build_and_compile_model(norm): # xy=(0.02 * delta, 0.75 * delta), xytext=(0.02 * delta, 0.75 * delta), ) -ax1.set_xlabel(r"True y") -ax1.set_ylabel(r"Predicted y") +ax1.set_xlabel(r"True $\overline{\rm{Nu}}$") +ax1.set_ylabel(r"Predicted $\overline{\rm{Nu}}$") +ax1.text(-0.3, 0.9, 'a', transform=ax1.transAxes, + size=12, weight='bold') -ax2.plot(history.history['loss'], label='loss') -ax2.plot(history.history['val_loss'], label='validation loss') +ax2.plot(history['loss'], label='loss') +ax2.plot(history['val_loss'], label='validation loss') +ax2.set_yscale('log') +ax2.text(-0.35, 0.9, 'b', transform=ax2.transAxes, + size=12, weight='bold') ax2.set_xlabel('Epoch') ax2.set_ylabel('MAE') ax2.legend(frameon = False) plt.subplots_adjust(wspace=0.4) + +plt.savefig('gniel_model.svg', bbox_inches='tight') + plt.show() + +############################################################################### #starting feature grouping analysis -new_model = tf.keras.models.load_model('gnielinski_model.tf') # alternatively, load saved model +model = tf.keras.models.load_model('gnielinski_model.tf') # load saved model other_columns = Data.iloc[:, :-1].columns @@ -180,7 +213,7 @@ def delta(lista, Data, i): # D = np.zeros([len(lista), N]) E = np.zeros([len(lista), N]) -for i in tqdm.tqdm(range(len(lista))): +for run in tqdm.tqdm(range(len(lista))): a = 0.6*np.ones(N) # initial guess for first iteration b = 0.6*np.ones(N) @@ -188,6 +221,7 @@ def delta(lista, Data, i): # d = 0.7*np.ones(N) e = -0.7*np.ones(N) + for j in range(repeat): beta = np.array([np.mean(a), np.mean(b), np.mean(c), np.mean(d), @@ -261,27 +295,27 @@ def MAIN(beta): with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds1 = new_model(x0) # to differentiate the function + preds1 = model(x0) # to differentiate the function dy_dx1 = tape.gradient(preds1, x0)[index_1] # evaluate the partial derivative of the model in x0 with respect to the first feature with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds2 = new_model(x0) + preds2 = model(x0) dy_dx2 = tape.gradient(preds2, x0)[index_2] with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds3 = new_model(x0) + preds3 = model(x0) dy_dx3 = tape.gradient(preds3, x0)[index_3] with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds4 = new_model(x0) + preds4 = model(x0) dy_dx4 = tape.gradient(preds4, x0)[index_4] with tf.GradientTape(persistent=True) as tape: tape.watch(x0) - preds5 = new_model(x0) + preds5 = model(x0) dy_dx5 = tape.gradient(preds5, x0)[index_5] grad1 = dy_dx1.numpy() # convert to array @@ -308,7 +342,7 @@ def MAIN(beta): return(DIFF) x = least_squares(MAIN, beta, method = 'trf', ftol = 2.3e-16, - verbose = 0, max_nfev = 50) # least-squares for rectangular matrices + verbose = 2, max_nfev = 20) # least-squares for rectangular matrices a[k] = x.x[0] b[k] = x.x[1] @@ -335,7 +369,8 @@ def MAIN(beta): frame = pd.DataFrame(array, columns = ['mean a', 'std a', 'mean b', 'std b', 'mean c', 'std c', 'mean d', - 'std d', 'mean e', 'std e']) # convert to dataframe for simplicity + 'std d', 'mean e', 'std e'], + index = lista) # convert to dataframe for simplicity # scatter plots for ind in range(len(A)): @@ -348,4 +383,4 @@ def MAIN(beta): #plt.title(lista[ind]) plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.ylim(-1,1) - plt.show() + plt.show() \ No newline at end of file