-
Notifications
You must be signed in to change notification settings - Fork 0
/
stream.py
59 lines (38 loc) · 1.77 KB
/
stream.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import r2_score,accuracy_score
import streamlit as st
# train[['CCAvg','Income']] = scaler.fit_transform(train[['CCAvg','Income']])
st.write ("""
Приложение для вычисления логарифмической регрессии
""")
st.sidebar.header('Добавление Datset')
file = st.sidebar.file_uploader('Твой Dataset',type=['csv'])
test = pd.read_csv(file)
scaler = StandardScaler()
test[['CCAvg','Income']] = scaler.fit_transform(test[['CCAvg','Income']])
class LogisticRegressionGradientDescent:
def __init__(self, learning_rate=0.01, num_iterations=1000):
self.learning_rate = learning_rate
self.num_iterations = num_iterations
self.theta = None
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def fit(self, X, y):
num_samples, num_features = X.shape
self.theta = np.zeros(num_features)
for _ in range(self.num_iterations):
linear_model = np.dot(X, self.theta)
y_pred = self.sigmoid(linear_model)
gradient = np.dot(X.T, (y_pred - y)) / num_samples
self.theta -= self.learning_rate * gradient
def predict(self, X):
linear_model = np.dot(X, self.theta)
y_pred = self.sigmoid(linear_model)
y_pred_class = [1 if i > 0.5 else 0 for i in y_pred]
return y_pred_class
model = LogisticRegressionGradientDescent()
model.fit(test[['CCAvg','Income']],test['Personal.Loan'])
accuracy = accuracy_score(test['Personal.Loan'], model.predict(test[['CCAvg','Income']]))
st.write({'Точность нашей модели' : accuracy.round(2)})