Attachment 'sheet07.m'
Download 1 function sheet07
2 load('stud-data.mat')
3
4 % compute kernel matrices
5 disp('computing kernel matrices...')
6 KR = full(Xtr'*Xtr);
7 KS = full(Xts'*Xts);
8 KSR = full(Xts'*Xtr);
9
10 % compute the alphas
11 disp('learning one-class-SVM...')
12 C = ?; % adjust C
13 alpha = oneclass(KR, C);
14
15 % compute anomaly scores
16 as = compute_scores(KS, KSR, KR, alpha);
17
18 Ap = (as > 1);
19
20 predicted_attacks = find(Ap)'
21 % ...
22
23 function [x,y] = pr_loqo2(c, H, A, b, l, u)
24 %[X,Y] = PR_LOQO2(c, H, A, b, l, u)
25 %
26 %loqo solves the quadratic programming problem
27 %
28 %minimize c' * x + 1/2 x' * H * x
29 %subject to A'*x = b
30 % l <= x <= u
31 %
32 % Dimensions: c : N-column vector
33 % H : NxN matrix
34 % A : N-row vector
35 % b : real number
36 % l : N-column vector
37 % b : N-column vector
38 %
39 % x : N-column vector
40 % y : Objective value
41 %
42 %for a documentation see R. Vanderbei, LOQO: an Interior Point Code
43 % for Quadratic Programming
44 margin = 0.05; bound = 100; sigfig_max = 8; counter_max = 50;
45 [m, n] = size(A); H_x = H; H_diag = diag(H);
46 b_plus_1 = 1; c_plus_1 = norm(c) + 1;
47 one_x = -ones(n,1); one_y = -ones(m,1);
48 for i = 1:n H_x(i,i) = H_diag(i) + 1; end;
49 H_y = eye(m); c_x = c; c_y = 0;
50 R = chol(H_x); H_Ac = R \ ([A; c_x'] / R)';
51 H_A = H_Ac(:,1:m); H_c = H_Ac(:,(m+1):(m+1));
52 A_H_A = A * H_A; A_H_c = A * H_c;
53 H_y_tmp = (A_H_A + H_y); y = H_y_tmp \ (c_y + A_H_c);
54 x = H_A * y - H_c; g = max(abs(x - l), bound);
55 z = max(abs(x), bound); t = max(abs(u - x), bound);
56 s = max(abs(x), bound); mu = (z' * g + s' * t)/(2 * n);
57 sigfig = 0; counter = 0; alfa = 1;
58 while ((sigfig < sigfig_max) * (counter < counter_max)),
59 counter = counter + 1; H_dot_x = H * x;
60 rho = - A * x + b; nu = l - x + g; tau = u - x - t;
61 sigma = c - A' * y - z + s + H_dot_x;
62 gamma_z = - z; gamma_s = - s;
63 x_dot_H_dot_x = x' * H_dot_x;
64 primal_infeasibility = norm([tau; nu]) / b_plus_1;
65 dual_infeasibility = norm([sigma]) / c_plus_1;
66 primal_obj = c' * x + 0.5 * x_dot_H_dot_x;
67 dual_obj = - 0.5 * x_dot_H_dot_x + l' * z - u' * s + b'*y; %%%
68 old_sigfig = sigfig;
69 sigfig = max(-log10(abs(primal_obj - dual_obj)/(abs(primal_obj) + 1)), 0);
70 hat_nu = nu + g .* gamma_z ./ z; hat_tau = tau - t .* gamma_s ./ s;
71 d = z ./ g + s ./ t;
72 for i = 1:n H_x(i,i) = H_diag(i) + d(i); end;
73 H_y = 0; c_x = sigma - z .* hat_nu ./ g - s .* hat_tau ./ t;
74 c_y = rho; R = chol(H_x); H_Ac = R \ ([A; c_x'] / R)';
75 H_A = H_Ac(:,1:m); H_c = H_Ac(:,(m+1):(m+1));
76 A_H_A = A * H_A; A_H_c = A * H_c; H_y_tmp = (A_H_A + H_y);
77 delta_y = H_y_tmp \ (c_y + A_H_c); delta_x = H_A * delta_y - H_c;
78 delta_s = s .* (delta_x - hat_tau) ./ t;
79 delta_z = z .* (hat_nu - delta_x) ./ g;
80 delta_g = g .* (gamma_z - delta_z) ./ z;
81 delta_t = t .* (gamma_s - delta_s) ./ s;
82 gamma_z = mu ./ g - z - delta_z .* delta_g ./ g;
83 gamma_s = mu ./ t - s - delta_s .* delta_t ./ t;
84 hat_nu = nu + g .* gamma_z ./ z;
85 hat_tau = tau - t .* gamma_s ./ s;
86 c_x = sigma - z .* hat_nu ./ g - s .* hat_tau ./ t;
87 c_y = rho; H_Ac = R \ ([A; c_x'] / R)';
88 H_A = H_Ac(:,1:m); H_c = H_Ac(:,(m+1):(m+1));
89 A_H_A = A * H_A; A_H_c = A * H_c;
90 H_y_tmp = (A_H_A + H_y); delta_y = H_y_tmp \ (c_y + A_H_c);
91 delta_x = H_A * delta_y - H_c; delta_s = s .* (delta_x - hat_tau) ./ t;
92 delta_z = z .* (hat_nu - delta_x) ./ g;
93 delta_g = g .* (gamma_z - delta_z) ./ z;
94 delta_t = t .* (gamma_s - delta_s) ./ s;
95 alfa = - 0.95 / min([delta_g ./ g; delta_t ./ t;
96 delta_z ./ z; delta_s ./ s; -1]);
97 mu = (z' * g + s' * t)/(2 * n);
98 mu = mu * ((alfa - 1) / (alfa + 10))^2;
99 x = x + delta_x * alfa; g = g + delta_g * alfa;
100 t = t + delta_t * alfa; y = y + delta_y * alfa;
101 z = z + delta_z * alfa; s = s + delta_s * alfa;
102 end
103
104 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
105 %
106 % Your solutions below!
107 %
108
109 % 3. Train a one-class SVM given the kernel matrix K and the
110 % regularization constant C.
111 function alpha = oneclass(K, C)
112 % ...
113
114 % 4. Compute the outlier scores given
115 % KR: kernel matrix on training data
116 % KS: kernel matrix on test data
117 % KSR: kernel matrix on test data / training data
118 % alpha: learned kernel coefficients
119 function scores = compute_scores(KS, KSR, KR, alpha)
120 % ...
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.