老师让只交.py,
于是很多东西直接在注释里写了
# -*-encoding:utf-8-*-
# Numerical Computation & Optimization
# homework1: LU decomposition
# cww97
# 2017/09/27
# from cww97jh@gmail.com
# to num_com_opt@163.com
from numpy import *
n = 100
def lu(A): #
""" Doolittle decomposition(course book page45) I just copy the formula from the course book :param A: the Coefficient matrix :return: L, U the lower & upper matrix """
L = mat(eye(n)) # low matrix
U = mat(zeros((n, n))) # upper matrix
for k in range(n): # k-th step
for i in range(k, n): # calc k-th row of U
U[k, i] = A[k, i] - sum(L[k, j]*U[j, i] for j in range(k))
for i in range(k+1, n): # k-th col of L
L[i, k] = (A[i, k] - sum(L[i, j]*U[j, k] for j in range(k))) / U[k, k]
return L, U
def calc(A, B):
""" calc the equation Ax = b(course book page46) I just copy the formula from the course book :param L: lower triangle matrix :param U: upper triangle matrix :param B: B = A * X :return: the x of A * X = B """
L, U = lu(A)
# first, calc L * Y = B, get Y
Y = mat(zeros((n, 1)))
for k in range(n):
Y[k, 0] = B[k, 0] - sum(L[k, j]*Y[j, 0] for j in range(k))
# then, calc U * X = Y, get X
X = mat(zeros((n, 1)))
for k in range(n-1, -1, -1):
X[k, 0] = (Y[k, 0] - sum(U[k, j]* X[j, 0] for j in range(k+1, n))) / U[k, k]
return X
""" sample data on course book, for debug A = mat([[2, 1, 5], [4, 1, 12], [-2, -4, 5]]) B = mat([[11], [27], [12]]) """
if __name__ == '__main__':
# Generate a random matrix M &
M = mat(random.randint(1, 100, size=(n, n)))
# A = M + I(Identity matrix)
A = M + mat(eye(n))
print('---------matrix A:----------\n', A,)
# Generate a vector x = (1,2,··· ,100).T
X = mat([[i+1] for i in range(n)])
print('---------vector X:----------\n', X.T, '.T')
# Generate the vector b as b = A * x
B = A * X
print('-------B = A * X:-----------\n', B.T, '.T')
x = calc(A, B)
print('calc the equation A * x = B, x:\n', x.T, '.T')
""" finished this, when I need to calc some equation, I think I may more likely to use this: x = np.linalg.solve(A, b) However, If I use this, this homework may cannot pass >_
python 矩阵操作
TAGS: 数值计算
数值计算与优化 指导老师: 王祥丰
陈伟文 10152510217
def get_data():
# Generate a random matrix M &
A = mat(zeros((N, N)))
for i in range(N):
A[i, i] = 2
for i in range(N-1):
A[i, i + 1] = -1
A[i + 1, i] = -1
print('---------matrix A:----------\n', A,)
# Generate a vector b = (1,1,··· ,1).T
b = mat(ones((N, 1)))
print('---------vector X:----------\n', b.T, '.T')
return A, b
vector norm
matrix norm
Wiki
核心算法
根据最后一个公式,写出如下代码
def norm(x):
ans = 0
for t in x:
ans += square(t[0, 0])
return sqrt(ans)
def jacobi(A, b):
n = len(b)
x0 = mat(zeros((n, 1)))
x1 = mat(zeros((n, 1)))
d = 1
ti = 0
while d > eps:
for i in range(n):
tmp = 0
for j in range(n):
if j != i:
tmp += A[i, j] * x0[j, 0]
x1[i, 0] = 1./A[i, i] * (b[i, 0] - tmp)
#print(x1.T)
x0 = x1
d = 1.*norm(A * x0 - b) / norm(b)
ti += 1
print('time = %d, d = %lf' % (ti, d))
return x1
跑了好久好久好久,,,我问主席跑了多久
为啥他能跑这么快
于是乎我瞄了一眼他的代码,发现,他没像我这样一个值一个值的计算,而是使用numpy自带的矩阵运算
好的,我傻了,又自造车轮,太好了
于是我决定把上面的函数命名为slow_jacobi
,来纪念我这个zz的操作
不过好歹等了十几分钟还是有结果出来的
运行速度慢了,不过迭代次数少了,不行,这么慢的代码我不能忍,重写
用这个式子
def jacobi(A, b): # 这次吸取教训,多用矩阵运算
# X^(k+1) = D^−1 (b − R * x^k)
n = len(b)
x0 = mat(zeros((n, 1)))
x1 = mat(zeros((n, 1)))
D = mat(diag(diag(A).tolist()))
R = A - D
norm_b = linalg.norm(b)
d = 1; ti = 0
while d > eps:
x1 = D.I * (b - R * x0)
x0 = x1; ti += 1
d = linalg.norm(A * x0 - b) / norm_b
print('time = %d, delta = %.8lf' % (ti, d))
return x0
迭代次数多了,不过10秒内出解
不要自造车轮,不要自造车轮,不要自造车轮
又是一波紧张刺激的抄公式(偷懒直接贴ppt了)
核心迭代还是抄一下吧
这个迭代可以用x迭代x
def gauss_seidel(A, b):
# $x ^ {k + 1} = B_G x ^ k + f_G$
# $B_G = (D - L) ^ {-1}, f_G = (D - L) ^ {-1}b$
n = len(b)
D = mat(diag(diag(A).tolist()))
U = mat(zeros((n, n))) - triu(A, 1)
L = mat(zeros((n, n))) - tril(A, -1)
bg = (D - L).I * U
fg = (D - L).I * b
norm_b = linalg.norm(b)
x = mat(zeros((n, 1)))
d = 1; ti = 0
while d > eps:
x = bg * x + fg
ti += 1
d = linalg.norm(A * x - b) / norm_b
print('time = %d, delta = %.8lf' % (ti, d))
return x
迭代次数少了很多
抄ppt
关于
一个跟实验报告一样的论文,还是c语言实现的
互动百科这么说
看来这个
def sor(A, b, w):
# $x^{k+1} = B_w x^k + f_w$
# $B_w = (D - wL)^{-1}[(1-w)D + wU]$
# $f_w = w(D - wL)^{-1}b$
n = len(b)
D = mat(diag(diag(A).tolist()))
U = mat(zeros((n, n))) - triu(A, 1)
L = mat(zeros((n, n))) - tril(A, -1)
bw = (D - w * L).I * ((1-w)*D + w*U)
fw = w * (D - w*L).I * b
norm_b = linalg.norm(b)
x = mat(zeros((n, 1)))
d = 1; ti = 0
while d > eps:
x = bw * x + fw
ti += 1
d = linalg.norm(A * x - b) / norm_b
print('w = %.2lf, time = %d' % (w, ti))
return x
这个时候我已经不关心是否能出正解了,肯定是正解
为了测试下
我又写了如下循环
w = 1.
while w <= 2:
w += 0.1
x = sor(A, b, w)
output:
w = 1.10, time = 11597
w = 1.20, time = 9448
w = 1.30, time = 7630
w = 1.40, time = 6071
w = 1.50, time = 4719
w = 1.60, time = 3536
w = 1.70, time = 2489
w = 1.80, time = 1554
w = 1.90, time = 696
是越大越快吗,我又改循环:
w = 1.8
while w <2:
w += 0.01
x = sor(A, b, w)
得到如下输出
w = 1.81, time = 1466
w = 1.82, time = 1378
w = 1.83, time = 1292
w = 1.84, time = 1205
w = 1.85, time = 1120
w = 1.86, time = 1035
w = 1.87, time = 950
w = 1.88, time = 866
w = 1.89, time = 781
w = 1.90, time = 696
w = 1.91, time = 609
w = 1.92, time = 520
w = 1.93, time = 423
w = 1.94, time = 293
w = 1.95, time = 303
w = 1.96, time = 404
w = 1.97, time = 505
w = 1.98, time = 808
w = 1.99, time = 1515
我觉得没有必要继续枚举了
我只能说对于本题,
[1] Lecture-3课程ppt
[2] wiki_jacobi
[3] 数值计算——线性方程组的迭代法
[4] 胡 枫 ,于福溪 《超松弛迭代法中松弛因子 ω的选取方法》 青海师范大学学报 2006.01.13 42-46
[5] 互动百科_松弛法
[6] numpy文档_上三角矩阵
[7] numpy文档_norm
陈伟文 10152510217
2017/10/19
Calculate the equation Ax = b through Conjugate Gradient Method
and QR Method respectively
wiki,中文wiki只有寥寥几句,英文的比较详细
贴ppt
观察了一下每次循环的逻辑:
很显然可以发现,x,r,p只需要一个变量就可以完成循环
def conjugate_gradient(A, b):
n = len(b)
norm_b = linalg.norm(b)
# generate x0, r0, p0
x = mat(zeros((n, 1)))
r = b - A * x; p = r # 1
# here we go
d = 1; ti = 0
while d > eps: #2
ap = A * p # 3
a = (float)(1.*(r.T * r) / (p.T * ap))
x += a * p # 4
r -= a * ap # 5
p = r + (float)(1.*(r.T * ap) / (p.T * ap)) * p #6
# for counting
ti += 1
d = linalg.norm(A * x - b) / norm_b
print('time = %d, d = %f' % (ti, d))
return x
我已经不关心方程解出来的正确性了(肯定是对的),看迭代次数吧
先贴公式
先算Q
然后算R
(tips:这里的R特别容易算错)
最后 :
def QR(A, b):
n = len(b)
# calc Q
R = mat(zeros((n, n)))
R[0, 0] = linalg.norm(A[:, 0])
Q = A.copy()
Q[:, 0] = A[:, 0] / linalg.norm(A[:, 0])
for j in range(1, n):
for i in range(j):
Q[:, j] -= float(A[:, j].T * Q[:, i]) * Q[:, i]
R[j, j] = linalg.norm(Q[:, j])
Q[:, j] /= linalg.norm(Q[:, j])
# calc R
for i in range(n):
for j in range(i+1, n):
R[i, j] = float(A[:, j].T * Q[:, i])
# calc x from Rx = Q^T * b
b = Q.T * b
x = mat(zeros((n, 1)))
for i in range(n-1, -1, -1):
x[i] = (b[i] - R[i, i+1:] * x[i+1:]) / R[i, i]
return x
输出取了整:
calc the equation A * x = B, x:
[[ 50. 99. 147. 194. 240. 285. 329. 372. 414. 455.
495. 534. 572. 609. 645. 680. 714. 747. 779. 810.
840. 869. 897. 924. 950. 975. 999. 1022. 1044. 1065.
1085. 1104. 1122. 1139. 1155. 1170. 1184. 1197. 1209. 1220.
1230. 1239. 1247. 1254. 1260. 1265. 1269. 1272. 1274. 1275.
1275. 1274. 1272. 1269. 1265. 1260. 1254. 1247. 1239. 1230.
1220. 1209. 1197. 1184. 1170. 1155. 1139. 1122. 1104. 1085.
1065. 1044. 1022. 999. 975. 950. 924. 897. 869. 840.
810. 779. 747. 714. 680. 645. 609. 572. 534. 495.
455. 414. 372. 329. 285. 240. 194. 147. 99. 50.]] .T
[1] numpyt.dot 计算矩阵内积
[2] dot常见error
[3] numpy线性代数
[4] numpy行列操作