Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import matplotlib.pyplot as plt
- from scipy.spatial.distance import cdist
- from sklearn.datasets import load_iris
- from sklearn.preprocessing import MinMaxScaler
- from sklearn.decomposition import PCA
- def are_neighbors(grid2d, i, j):
- """
- odleglosc Chebyshev = 1 (8-neighborhood)
- """
- di = abs(grid2d[i, 0] - grid2d[j, 0])
- dj = abs(grid2d[i, 1] - grid2d[j, 1])
- return max(di, dj) == 1 # true jesli sasiaduja
- def movmean(data, window):
- """
- srednia ruchoma
- """
- result = np.zeros_like(data)
- half = window // 2
- for i in range(len(data)):
- start = max(0, i - half)
- end = min(len(data), i + half + 1)
- result[i] = np.mean(data[start:end])
- return result
- def som_iris_ex4():
- rng = np.random.default_rng(7)
- R, C = 12, 12 # wieksza siatka
- M = R * C
- iters = 4000 # wiecej iteracji dla wiekszych siatek
- eta0 = 0.12
- sigma0 = max(R, C) / 2 # poczatkowe sasiedztwo (globalne porzadkowanie)
- sigma_final_list = [1, 2, 3] # koncowe rozmiary sasiedztwa do testowania
- iris = load_iris()
- X = iris.data
- scaler = MinMaxScaler()
- X = scaler.fit_transform(X) # 150x4, kazda cecha do [0,1]
- S = X.shape[0]
- print(f"loaded iris: {S} samples, {X.shape[1]} features")
- print(f"grid: {R}x{C} = {M} neurons")
- print(f"testing sigma_final: {sigma_final_list}")
- gx, gy = np.meshgrid(np.arange(1, C+1), np.arange(1, R+1))
- grid2d = np.column_stack([gx.ravel(), gy.ravel()])
- fig1 = plt.figure(figsize=(10, 6), facecolor='white')
- ax1 = fig1.add_subplot(111)
- ax1.set_xlabel('Iteration')
- ax1.set_ylabel('Topographic Error')
- ax1.set_title(f'R={R}, C={C} — Effect of final σ in {{1,2,3}}')
- ax1.grid(True)
- ax1.box(True)
- results = []
- for k, sigma_final in enumerate(sigma_final_list):
- print(f'\n=== Testing sigma_final = {sigma_final} ===')
- W = rng.random((M, X.shape[1])) # w [0,1], ten sam skala co X
- topoErr = np.zeros(iters)
- tau = iters # jedna stala czasowa przez caly przebieg
- for t in range(1, iters + 1):
- sigma_t = sigma_final + (sigma0 - sigma_final) * np.exp(-t / tau)
- eta_t = eta0 * np.exp(-t / tau)
- x = X[rng.integers(S), :]
- dists = np.sum((W - x) ** 2, axis=1)
- idx = np.argsort(dists)
- bmu1 = idx[0]
- bmu2 = idx[1]
- dgrid2 = np.sum((grid2d - grid2d[bmu1, :]) ** 2, axis=1)
- h = np.exp(-dgrid2 / (2 * sigma_t ** 2))
- h = h / np.max(h)
- W = W + eta_t * (h[:, np.newaxis] * (x - W))
- topoErr[t-1] = 0 if are_neighbors(grid2d, bmu1, bmu2) else 1
- if t % 1000 == 0:
- print(f" iteration {t}/{iters}, sigma={sigma_t:.3f}, eta={eta_t:.5f}")
- win = 50
- topoErr_s = movmean(topoErr, win)
- results.append({
- 'sigma_final': sigma_final,
- 'W': W.copy(),
- 'topoErr': topoErr.copy(),
- 'topoErr_s': topoErr_s.copy()
- })
- # rysuj krzywą TE
- ax1.plot(range(1, iters + 1), topoErr_s,
- label=f'σ_final={sigma_final}', linewidth=1.5)
- ax1.legend(loc='northeast')
- plt.tight_layout()
- plt.show()
- fig2 = plt.figure(figsize=(15, 5), facecolor='white')
- for k, result in enumerate(results):
- W = result['W']
- pca = PCA(n_components=2)
- W2 = pca.fit_transform(W)
- ax = fig2.add_subplot(1, len(sigma_final_list), k + 1)
- ax.set_aspect('equal')
- ax.set_title(f'σ_final={result["sigma_final"]}')
- ax.box(True)
- WX = W2[:, 0].reshape((R, C))
- WY = W2[:, 1].reshape((R, C))
- # linie poziome
- for r in range(R):
- ax.plot(WX[r, :], WY[r, :], '-', linewidth=1.0, color='blue', alpha=0.6)
- # linie pionowe
- for c in range(C):
- ax.plot(WX[:, c], WY[:, c], '-', linewidth=1.0, color='blue', alpha=0.6)
- # male znaczniki dla neuronow
- ax.plot(W2[:, 0], W2[:, 1], 'ko', markersize=2, markerfacecolor='k')
- plt.suptitle('Final Lattices Projected by PCA', fontsize=14)
- plt.tight_layout()
- plt.show()
- print('\n== Summary (lower TE is better) ==')
- for result in results:
- te_mean = np.mean(result['topoErr'][-500:]) # srednia z ostatnich 500
- print(f'sigma_final={result["sigma_final"]} -> mean TE (last 500 iters): {te_mean:.4f}')
- if __name__ == '__main__':
- som_iris_ex4()
Advertisement
Add Comment
Please, Sign In to add comment