需要混沌系统的李雅普诺夫指数、K熵、样本熵:给出N维离散混沌映射李雅普诺夫指数、K熵、样本熵图像的python代码,速采纳解题提供酬金,感谢
方案一:
以下是一个计算混沌系统的李雅普诺夫指数、K熵和样本熵的Python代码,可以适用于N维离散混沌映射。
import numpy as np
import matplotlib.pyplot as plt
def lyapunov_exponent(x0, f, num_iter):
n = len(x0)
J = np.zeros((n, n))
x = np.array(x0)
for i in range(num_iter):
J_new = np.zeros((n, n))
for j in range(n):
x_plus = np.array(x)
x_plus[j] += 1e-6
J_new[:, j] = (f(x_plus) - f(x)) / 1e-6
J = np.dot(J_new, J)
x = f(x)
w, _ = np.linalg.eig(J.T @ J)
return np.mean(np.log(np.abs(w)))
def k_entropy(x, k):
n = len(x)
p = np.zeros((k, n-k+1))
for i in range(k):
for j in range(n-k+1):
p[i,j] = np.prod([x[j+l] for l in range(i,i+k)])
p[i,:] /= np.sum(p[i,:])
return -np.sum(p * np.log2(p+1e-10))
def sample_entropy(x, m, r):
n = len(x)
B = np.zeros((n-m+1,m))
for i in range(n-m+1):
B[i,:] = x[i:i+m]
D = np.zeros((n-m+1,n-m+1))
for i in range(n-m+1):
for j in range(n-m+1):
D[i,j] = np.max(np.abs(B[i,:] - B[j,:]))
return -np.log(np.sum(D <= r) / (n-m+1))
def logistic_map(x, r=4):
return r*x*(1-x)
def henon_map(x, y, a=1.4, b=0.3):
return np.array([1-a*x**2+y, b*x])
if __name__ == '__main__':
# 示例代码:计算Logistic映射的李雅普诺夫指数、K熵和样本熵
x0 = [0.1]
r = 4
f = lambda x: logistic_map(x, r)
lyap_exp = lyapunov_exponent(x0, f, num_iter=1000)
x = np.zeros(5000)
x[0] = x0[0]
for i in range(1, 5000):
x[i] = f(x[i-1])
k_ent = k_entropy(x, k=2)
samp_ent = sample_entropy(x, m=2, r=0.2)
print(f"Lyapunov exponent: {lyap_exp}")
print(f"K entropy: {k_ent}")
print(f"Sample entropy: {samp_ent}")
plt.plot(x)
plt.title(f"Logistic Map (r={r})")
plt.xlabel("Iteration")
plt.ylabel("x")
plt.show()
你可以使用这个代码,将自己的混沌系统的离散映射函数 f
替换为其中的 logistic_map
或 henon_map
,
方案二:
以下是一个基于Python的程序,用于计算N维混沌映射的Lyapunov指数、K熵和样本熵,并生成相应的图像。
import numpy as np
import matplotlib.pyplot as plt
def logistic_map(r, x):
return r * x * (1 - x)
def lyapunov_exponent(r, x0, n):
x = x0
sum = 0
for i in range(n):
fx = logistic_map(r, x)
dfx = r * (1 - 2 * x)
sum += np.log(abs(dfx))
x = fx
return sum / n
def entropy_k(r, x0, n, k):
x = x0
freq = np.zeros(k)
for i in range(n):
fx = logistic_map(r, x)
index = int(fx * k)
freq[index] += 1
x = fx
freq = freq / n
freq = freq[np.nonzero(freq)]
return -np.sum(freq * np.log(freq))
def entropy_sample(r, x0, n, m):
x = x0
freq = np.zeros(m)
for i in range(n):
fx = logistic_map(r, x)
index = int(np.floor(m * fx))
if index == m:
index = m - 1
freq[index] += 1
x = fx
freq = freq[np.nonzero(freq)]
freq = freq / n
return -np.sum(freq * np.log(freq))
def main():
r = 4
x0 = 0.1
n = 10000
m = 100
lyapunov = lyapunov_exponent(r, x0, n)
entropyk = entropy_k(r, x0, n, m)
entropys = entropy_sample(r, x0, n, m)
print("Lyapunov exponent:", lyapunov)
print("K entropy:", entropyk)
print("Sample entropy:", entropys)
x = np.zeros(n)
x[0] = x0
for i in range(1, n):
x[i] = logistic_map(r, x[i-1])
plt.plot(x)
plt.show()
if __name__ == "__main__":
main()
在这个程序中,我们使用了一个简单的混沌映射函数logistic_map。通过调整参数r,可以改变混沌映射的行为。我们使用Lyapunov指数、K熵和样本熵来描述混沌映射的性质,并将其可视化为图像。
程序输出了计算出的Lyapunov指数、K熵和样本熵的值,并显示了混沌映射的图像。如果您希望生成其他维数或不同形式的混沌映射的相应图像,您可以修改程序中的logistic_map函数。
引用chatGPT作答,下面是使用Python生成二次映射的K熵和样本熵图像的代码:
import numpy as np
import matplotlib.pyplot as plt
def quadratic_map(x, r):
return r * x * (1 - x)
def k_entropy(data, k):
count_dict = {}
for i in range(len(data) - k):
pattern = tuple(data[i:i+k])
if pattern in count_dict:
count_dict[pattern] += 1
else:
count_dict[pattern] = 1
counts = np.array(list(count_dict.values()))
probs = counts / np.sum(counts)
return -np.sum(probs * np.log2(probs))
def sample_entropy(data, k):
r = 0.2
m = len(data)
A = np.zeros((m-k+1, k))
for i in range(m-k+1):
A[i, :] = data[i:i+k]
B = np.zeros((m-k+1, m-k+1))
for i in range(m-k+1):
for j in range(m-k+1):
if i != j:
B[i,j] = np.max(np.abs(A[i,:] - A[j,:]))
B = np.exp(-r * B)
return -np.log(np.sum(B) / ((m-k+1) ** 2))
# generate data
x = 0.1
r = 3.8
n = 10000
data = np.zeros(n)
for i in range(n):
data[i] = x
x = quadratic_map(x, r)
# calculate k entropy and sample entropy
ks = range(1, 10)
k_entropies = []
sample_entropies = []
for k in ks:
k_entropies.append(k_entropy(data, k))
sample_entropies.append(sample_entropy(data, k))
# plot k entropy and sample entropy
plt.plot(ks, k_entropies, label="K entropy")
plt.plot(ks, sample_entropies, label="Sample entropy")
plt.xlabel("k")
plt.ylabel("Entropy")
plt.legend()
plt.show()
这段代码首先定义了一个 quadratic_map 函数,用于实现二次映射。然后,使用 k_entropy 函数和 sample_entropy 函数分别计算K熵和样本熵。在本例中,使用r = 0.2进行样本熵计算。最后,通过循环计算1到9个周期的K熵和样本熵,并使用 matplotlib 库在同一个图中绘制了两者的折线图。
import numpy as np
import matplotlib.pyplot as plt
# 定义quadratic映射方程
def quadratic(x, c):
return c * x * (1 - x)
# 定义计算K熵的函数
def calc_ks_entropy(x, c, k=1):
n_total = len(x) - k
counts = {}
for i in range(n_total):
seq = tuple(x[i:i+k])
if seq in counts:
counts[seq] += 1
else:
counts[seq] = 1
entropy = 0
for seq, count in counts.items():
p = count / n_total
entropy -= p * np.log(p)
return entropy
# 定义计算样本熵的函数
def calc_sample_entropy(x, c, k=1):
n_total = len(x) - k
counts = {}
for i in range(n_total):
seq = tuple(x[i:i+k])
if seq in counts:
counts[seq] += 1
else:
counts[seq] = 1
entropy = 0
for seq, count in counts.items():
p = count / n_total
entropy -= np.log(p)
return entropy / n_total
# 定义参数范围
c_min = 0
c_max = 2
n_c = 200
n_sample = 10000
# 计算不同参数下的K熵、样本熵
cs = np.linspace(c_min, c_max, n_c)
ks_entropies = []
sample_entropies = []
for i, c in enumerate(cs):
x = np.zeros(n_sample)
x[0] = 0.5
for j in range(1, n_sample):
x[j] = quadratic(x[j-1], c)
ks_entropies.append(calc_ks_entropy(x, c))
sample_entropies.append(calc_sample_entropy(x, c))
# 绘制K熵、样本熵随参数的变化图像
fig, axs = plt.subplots(2, 1, figsize=(8, 8))
axs[0].plot(cs, ks_entropies)
axs[0].set_title('K-Entropy')
axs[0].set_xlabel('c')
axs[0].set_ylabel('Entropy')
axs[1].plot(cs, sample_entropies)
axs[1].set_title('Sample Entropy')
axs[1].set_xlabel('c')
axs[1].set_ylabel('Entropy')
plt.tight_layout()
plt.show()
$105%100#100%,X^ZnlinuX~“wall~zZ^文贤usb·.·05:15:usb~Zz^free<<<{;{@{¥{c{m{d/{5}0}/}c}m}d}/}~Zzn^linuX~测CSDN试>”2/5ttkts5/2ttkts2/5print¥55,5/5ttkts5/2ttkts2/5print55;13453636000
print("holle word‘);
该回答引用ChatGPT
以下是一个简单的Python代码,用于计算N维离散混沌映射的K熵和样本熵,并生成相应的图像。
import numpy as np
import matplotlib.pyplot as plt
def logistic_map(x, r):
return r * x * (1 - x)
def chaotic_map(x, y, r):
x_new = logistic_map(x, r)
y_new = logistic_map(y, r)
return x_new, y_new
def k_entropy(data, k):
n = len(data)
counts = np.zeros((k, k))
for i in range(n - 1):
x = data[i]
y = data[i + 1]
counts[x, y] += 1
p = counts / np.sum(counts)
p[p == 0] = 1
return -np.sum(p * np.log2(p))
def sample_entropy(data, m, r):
n = len(data)
counts = np.zeros(n - m)
for i in range(n - m):
for j in range(i + 1, n - m):
if np.max(np.abs(data[i:i+m] - data[j:j+m])) <= r:
counts[i] += 1
return -np.log(np.sum(counts) / (n - m))
def plot_entropy(r_values, k_entropy_values, sample_entropy_values):
plt.plot(r_values, k_entropy_values, label='K entropy')
plt.plot(r_values, sample_entropy_values, label='Sample entropy')
plt.xlabel('r')
plt.ylabel('Entropy')
plt.legend()
plt.show()
# Parameters
N = 2
k = 10
m = 2
r = 0.1
# Generate data
x = np.random.rand(N)
y = np.random.rand(N)
r_values = np.linspace(2.5, 4.0, 1000)
k_entropy_values = np.zeros_like(r_values)
sample_entropy_values = np.zeros_like(r_values)
# Compute entropy values
for i, r in enumerate(r_values):
for j in range(1000):
x, y = chaotic_map(x, y, r)
data = np.array([np.floor(x * k), np.floor(y * k)])
k_entropy_values[i] = k_entropy(data, k)
sample_entropy_values[i] = sample_entropy(data[0], m, r)
# Plot results
plot_entropy(r_values, k_entropy_values, sample_entropy_values)
在这个代码中,我们使用了一个二维的离散混沌映射,即每个维度都是一个逻辑映射。我们计算了K熵和样本熵,并使用Matplotlib库生成了相应的图像。您可以根据需要调整参数,例如维度N、符号数k、滑动窗口大小m和阈值r。