1
1
import numpy as np
2
2
from numpy .linalg import inv
3
3
4
- def computeCost (x ,y ,theta ):
5
-
6
- j = 1 / 2 * np .mean (np .power (np .matmul (x .transpose (),theta )- y ,2 ))
7
4
8
- return j
5
+ def computeCost (x , y , theta ):
6
+
7
+ j = 1 / 2 * np .mean (np .power (np .matmul (x .transpose (), theta )- y , 2 ))
8
+
9
+ return j
9
10
10
11
11
12
def gradientDescent (x , y , theta , alpha , iterations ):
12
- m = x .shape [1 ]
13
- costHistory = np .zeros (iterations )
14
- for i in range (iterations ):
13
+ m = x .shape [1 ]
14
+ costHistory = np .zeros (iterations )
15
+ for i in range (iterations ):
16
+
17
+ error = np .matmul (x .transpose (), theta )- y
18
+ gradient = np .dot (x , error )
15
19
16
- error = np .matmul (x .transpose (),theta )- y
17
- gradient = np .dot (x ,error )
18
-
19
- theta = theta - alpha * gradient / m
20
- costHistory [i ]= computeCost (x ,y ,theta )
20
+ theta = theta - alpha * gradient / m
21
+ costHistory [i ] = computeCost (x , y , theta )
21
22
22
- return [theta , costHistory ]
23
+ return [theta , costHistory ]
23
24
24
- def normalize (x ,mu ,std ):
25
- return (x - mu )/ std
25
+
26
+ def normalize (x , mu , std ):
27
+ return (x - mu )/ std
26
28
27
29
28
30
def featureNormalize (x ):
29
- vfunc = np .vectorize (normalize )
30
- mu = np .mean (x ,axis = 0 )
31
- sigma = np .std (x ,axis = 0 )
32
- x_norm = vfunc (x ,mu ,sigma )
31
+ vfunc = np .vectorize (normalize )
32
+ mu = np .mean (x , axis = 0 )
33
+ sigma = np .std (x , axis = 0 )
34
+ x_norm = vfunc (x , mu , sigma )
35
+
36
+ return [x_norm , mu , sigma ]
33
37
34
- return [x_norm , mu , sigma ]
35
38
36
39
def computeCostMulti (x , y , theta ):
37
- m = y .shape [0 ]
38
- j = np .sum (np .power (np .matmul (x ,theta )- y ,2 ))/ (2 * m )
39
- return j
40
+ m = y .shape [0 ]
41
+ j = np .sum (np .power (np .matmul (x , theta )- y , 2 ))/ (2 * m )
42
+ return j
43
+
40
44
41
45
def gradientDescentMulti (x , y , theta , alpha , num_iters ):
42
- m = y .shape [0 ]
43
- j_history = np .zeros (num_iters )
44
- for i in range (0 ,num_iters ):
45
- error = np .matmul (x ,theta )- y
46
- theta = theta - alpha * np .dot (error ,x )/ m
47
- j_history [i ] = computeCostMulti (x ,y , theta )
48
-
49
- return [theta , j_history ]
50
-
51
-
52
- def normalEqn (X ,Y ):
53
- X = np .matmul (inv (np .matmul (X .transpose (),X )),X .transpose ())
54
- theta = np .matmul (X ,Y )
55
- return theta
46
+ m = y .shape [0 ]
47
+ j_history = np .zeros (num_iters )
48
+ for i in range (0 , num_iters ):
49
+ error = np .matmul (x , theta )- y
50
+ theta = theta - alpha * np .dot (error , x )/ m
51
+ j_history [i ] = computeCostMulti (x , y , theta )
52
+
53
+ return [theta , j_history ]
54
+
55
+
56
+ def normalEqn (X , Y ):
57
+ X = np .matmul (inv (np .matmul (X .transpose (), X )), X .transpose ())
58
+ theta = np .matmul (X , Y )
59
+ return theta
0 commit comments