math

线性规划

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from scipy import optimize
import numpy as np


# scipy.optimize.linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs', callback=None,
# options=None, x0=None, integrality=None)
# c是求解目标函数的系数向量,A_ub与b_ub对应的约束不等式的系数向量,A_eq与b_eq对应的是约束等式的系数向量,
# bounds对应的是每个未知变量的取值上限与下限的取值向量。
# 默认求解最小值,不等式为小于等于

if __name__ == '__main__':
c = np.array([-2, -3, 5])
A = np.array([[-2, 5, -1], [1, 3, 1]])
B = np.array([-10, 12])
Aeq = np.array([[1, 1, 1]])
Beq = np.array([7])
res = optimize.linprog(-c, A, B, Aeq, Beq)
print(res)

结果:

1
2
3
4
5
6
7
8
    con: array([1.52631919e-07])
fun: -14.000000657683216
message: 'Optimization terminated successfully.'
nit: 5
slack: array([-4.30601382e-07, 5.00000013e+00])
status: 0
success: True
x: array([2.99999979e+00, 1.04988686e-08, 4.00000005e+00])

本题是求最大值,那么只需要目标函数加符号,最后将得出的最小值取反即可。

所以本体最大值为14.000000657683216

1
2
3
4
5
6
C = [-1,4]
A = [[-3,1],[1,2]]
b = [6,4]
bounds=[[None,None],[-3,None]]
res = linprog(C,A,b,bounds=bounds)
print(res)
1
2
3
4
5
6
7
8
    con: array([], dtype=float64)
fun: -21.99999984082492
message: 'Optimization terminated successfully.'
nit: 6
slack: array([3.89999997e+01, 8.46872652e-08])
status: 0
success: True
x: array([ 9.99999989, -2.99999999])

非线性规划

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# coding=utf-8
from scipy.optimize import minimize
import numpy as np


# scipy.optimize.minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(),
# tol=None, callback=None, options=None)

# fun: 求最小值的目标函数
# x0: 变量的初始猜测值,如果有多个变量,需要给每个变量一个初始猜测值
# args: 常数值,fun中的可变常量
# method : str or callable, 可选的求解器的类型,应该从下面选取一种(如果未给出,则选择 BFGS、L-BFGS-B、SLSQP 之一)
# constraints: 约束条件,针对fun中为参数的部分进行约束限制

# demo 1
# 计算 1/x+x 的最小值
def fun(args):
a = args
v = lambda x: a / x[0] + x[0]
return v


if __name__ == "__main__":
args = (1) # a
x0 = np.asarray((2)) # 初始猜测值
res = minimize(fun(args), x0, method='SLSQP')
print(res)

计算 (2+x1)/(1+x2) - 3x1+4x3 的最小值, x1, x2, x3 都处于[0.1, 0.9] 区间内。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
def fun(args):
a,b,c,d = args
v = lambda x: (a+x[0])/(b+x[1]) -c*x[0]+d*x[2]
return v

def con(args):
# 约束条件 分为eq 和ineq
# eq表示 函数结果等于0 ; ineq 表示 表达式大于等于0
x1min, x1max, x2min, x2max, x3min, x3max = args
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - x1min},\
{'type': 'ineq', 'fun': lambda x: -x[0] + x1max},\
{'type': 'ineq', 'fun': lambda x: x[1] - x2min},\
{'type': 'ineq', 'fun': lambda x: -x[1] + x2max},\
{'type': 'ineq', 'fun': lambda x: x[2] - x3min},\
{'type': 'ineq', 'fun': lambda x: -x[2] + x3max})
return cons

# 定义常量值
args = (2,1,3,4) # a,b,c,d

# 设置参数范围/约束条件
args1 = (0.1,0.9,0.1, 0.9,0.1,0.9) # x1min, x1max, x2min, x2max
cons = con(args1)

# 设置初始猜测值
x0 = np.asarray((0.5,0.5,0.5))

res = minimize(fun(args), x0, method='SLSQP',constraints=cons)
print(res)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# 目标函数
def fun(a,b,c,d):
def v(x):
return np.log2(1+x[0]*a/b)+np.log2(1+x[1]*c/d)
return v

#限制条件函数
def con(a,b,i):
def v(x):
return np.log2(1 + x[i] * a / b)-5
return v

# 定义常量值
args = [2, 1, 3, 4] # a,b,c,d
args1 = [2, 5, 6, 4]

# 设置初始猜测值
x0 = np.asarray((0.5, 0.5))

#设置限制条件
cons = ({'type': 'ineq', 'fun': con(args1[0],args1[1],0)},
{'type': 'ineq', 'fun': con(args1[2],args1[3],1)},
)

res = minimize(fun(args[0], args[1], args[2], args[3]), x0, constraints=cons)
print(res)

层次分析法

https://blog.csdn.net/weixin_43819566/article/details/112251317

Topsis(优劣解距离法)

第一步 将指标正向化

第二步 将指标归一化

第三步 计算得分并归一化

代码实现 https://blog.csdn.net/weixin_52300428/article/details/126309794

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# coding=utf-8
# 导入分析库
import pandas as pd
import numpy as np


# 极小型指标 -> 极大型指标
def Mintomax(datas):
return np.max(datas) - datas


# 中间型指标 -> 极大型指标
def Midtomax(datas, x_best):
temp_datas = datas - x_best
M = np.max(abs(temp_datas))
answer_datas = 1 - abs(datas - x_best) / M
return answer_datas


# 区间型指标 -> 极大型指标
def Intertomax(datas, x_min, x_max):
M = max(x_min - np.min(datas), np.max(datas) - x_max)
answer_list = []
for i in datas:
if (i < x_min):
answer_list.append(1 - (x_min - i) / M)
elif (i > x_max):
answer_list.append(1 - (i - x_max) / M)
else:
answer_list.append(1)
return np.array(answer_list)


# 正向化矩阵标准化(去除量纲影响)
def Standard(datas):
k = np.power(np.sum(pow(datas, 2), axis=0), 0.5)
for i in range(len(k)):
datas[:, i] = datas[:, i] / k[i]
return datas


# 计算得分并归一化
def Score(sta_data):
z_max = np.amax(sta_data, axis=0)
z_min = np.amin(sta_data, axis=0)
# 计算每一个样本点与最大值的距离
tmpmaxdist = np.power(np.sum(np.power((z_max - sta_data), 2), axis=1), 0.5)
tmpmindist = np.power(np.sum(np.power((z_min - sta_data), 2), axis=1), 0.5)
score = tmpmindist / (tmpmindist + tmpmaxdist)
score = score / np.sum(score) # 归一化处理
return score


if __name__ == '__main__':
# 导入数据
df = pd.read_excel(r'./20条河流的水质情况数据.xlsx')

# 正向化
df['细菌总数(个/mL)'] = Mintomax(df['细菌总数(个/mL)']) # 极小型指标 -> 极大型指标
df['PH值'] = Midtomax(df['PH值'], 7) # 中间型指标 -> 极大型指标 7为最佳值
df['植物性营养物量(ppm)'] = Intertomax(df['植物性营养物量(ppm)'], 10, 20) # 区间型指标 -> 极大型指标 10为下界,20为上界

label_need = df.keys()[1:]
data = df[label_need].values # 刨除变量名后的数据值
sta_data = Standard(data) # 正向化矩阵标准化(去除量纲影响)

sco = Score(sta_data) # 计算得分
# 将计算得到的得分与源数据一起进行整理,形成dataframe
df['score'] = sco

df.to_csv('Topsis.csv', index=False, encoding='utf_8_sig')

分类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC, SVC
from sklearn.preprocessing import StandardScaler

# 归一化处理
# scaler = StandardScaler()
# scaler.fit(X)
# X_stand = scaler.transform(X)

iris_X = []
iris_y = []
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)

# 逻辑回归
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)

accuracy = accuracy_score(y_pred, y_test)

# 线性svm
model2 = LinearSVC()
# 带核函数
# poly: 偏线性,rbf: 偏非线性,sigmoid: 非线性
Kernel = ["linear", "poly", "rbf", "sigmoid"]
model3 = SVC(kernel="rbf")

回归

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import numpy as np
from sklearn.linear_model import LinearRegression, Lasso, Ridge
import matplotlib.pyplot as plt

data = np.loadtxt("data.csv", delimiter=",")

# 特征:除最后一列
x_data = data[:, 0:-1]
# 标签:最后一列
y_data = data[:, -1]

# 训练模型
# 创建线性回归模型对象
model = LinearRegression()
# 训练模型
model.fit(x_data, y_data)
# 构建测试样本
x_test = [[102, 4]]
# 预测
predict = model.predict(x_test)


# Lasso回归
model2 = Lasso(alpha=0.1)

# 岭回归
model3 = Ridge(alpha=0.1)

本博客所有文章除特别声明外,均采用 CC BY-SA 4.0 协议 ,转载请注明出处!