Skip to content

Commit 90e0232

Browse files
committed
fix linter
1 parent caed7d0 commit 90e0232

File tree

6 files changed

+126
-270
lines changed

6 files changed

+126
-270
lines changed

simulation-system/libs/csle-tolerance/tests/test_intrusion_recovery_pomdp_config.py

-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
)
44
from csle_tolerance.util.intrusion_recovery_pomdp_util import IntrusionRecoveryPomdpUtil
55
import pytest_mock
6-
import numpy as np
76

87

98
class TestIntrusionRecoveryPomdpConfigSuite:

simulation-system/libs/csle-tolerance/tests/test_intrusion_recovery_pomdp_util.py

+43-141
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,7 @@ def test__state_space(self) -> None:
2020
2121
:return: None
2222
"""
23-
assert (
24-
isinstance(item, int) for item in IntrusionRecoveryPomdpUtil.state_space()
25-
)
23+
assert (isinstance(item, int) for item in IntrusionRecoveryPomdpUtil.state_space())
2624
assert IntrusionRecoveryPomdpUtil.state_space() is not None
2725
assert IntrusionRecoveryPomdpUtil.state_space() == [0, 1, 2]
2826

@@ -40,9 +38,7 @@ def test_action_space(self) -> None:
4038
4139
:return: None
4240
"""
43-
assert (
44-
isinstance(item, int) for item in IntrusionRecoveryPomdpUtil.action_space()
45-
)
41+
assert (isinstance(item, int) for item in IntrusionRecoveryPomdpUtil.action_space())
4642
assert IntrusionRecoveryPomdpUtil.action_space() is not None
4743
assert IntrusionRecoveryPomdpUtil.action_space() == [0, 1]
4844

@@ -79,10 +75,7 @@ def test_cost_tensor(self) -> None:
7975
actions = [0]
8076
negate = False
8177
expected = [[0, 0.5]]
82-
assert (
83-
IntrusionRecoveryPomdpUtil.cost_tensor(eta, states, actions, negate)
84-
== expected
85-
)
78+
assert IntrusionRecoveryPomdpUtil.cost_tensor(eta, states, actions, negate) == expected
8679

8780
def test_observation_function(self) -> None:
8881
"""
@@ -93,9 +86,7 @@ def test_observation_function(self) -> None:
9386
s = 1
9487
o = 1
9588
num_observations = 2
96-
assert round(
97-
IntrusionRecoveryPomdpUtil.observation_function(s, o, num_observations), 1
98-
)
89+
assert round(IntrusionRecoveryPomdpUtil.observation_function(s, o, num_observations), 1)
9990

10091
def test_observation_tensor(self) -> None:
10192
"""
@@ -126,15 +117,7 @@ def test_transition_function(self) -> None:
126117
p_c_1 = 0.1
127118
p_c_2 = 0.2
128119
p_u = 0.5
129-
assert (
130-
round(
131-
IntrusionRecoveryPomdpUtil.transition_function(
132-
s, s_prime, a, p_a, p_c_1, p_c_2, p_u
133-
),
134-
1,
135-
)
136-
== 0.2
137-
)
120+
assert (round(IntrusionRecoveryPomdpUtil.transition_function(s, s_prime, a, p_a, p_c_1, p_c_2, p_u), 1) == 0.2)
138121

139122
def test_transition_function_game(self) -> None:
140123
"""
@@ -148,15 +131,7 @@ def test_transition_function_game(self) -> None:
148131
a2 = 1
149132
p_a = 0.2
150133
p_c_1 = 0.1
151-
assert (
152-
round(
153-
IntrusionRecoveryPomdpUtil.transition_function_game(
154-
s, s_prime, a1, a2, p_a, p_c_1
155-
),
156-
2,
157-
)
158-
== 0.18
159-
)
134+
assert (round(IntrusionRecoveryPomdpUtil.transition_function_game(s, s_prime, a1, a2, p_a, p_c_1), 2) == 0.18)
160135

161136
def test_transition_tensor(self) -> None:
162137
"""
@@ -171,19 +146,15 @@ def test_transition_tensor(self) -> None:
171146
p_c_2 = 0.2
172147
p_u = 0.5
173148
expected = [[[0.7, 0.2, 0.1], [0.4, 0.4, 0.2], [0, 0, 1.0]]]
174-
transition_tensor = IntrusionRecoveryPomdpUtil.transition_tensor(
175-
states, actions, p_a, p_c_1, p_c_2, p_u
176-
)
149+
transition_tensor = IntrusionRecoveryPomdpUtil.transition_tensor(states, actions, p_a, p_c_1, p_c_2, p_u)
177150
for i in range(len(transition_tensor)):
178151
for j in range(len(transition_tensor[i])):
179152
for k in range(len(transition_tensor[i][j])):
180153
transition_tensor[i][j][k] = round(transition_tensor[i][j][k], 1)
181154
assert transition_tensor == expected
182155
states = [0, 1]
183156
with pytest.raises(AssertionError):
184-
transition_tensor = IntrusionRecoveryPomdpUtil.transition_tensor(
185-
states, actions, p_a, p_c_1, p_c_2, p_u
186-
)
157+
IntrusionRecoveryPomdpUtil.transition_tensor(states, actions, p_a, p_c_1, p_c_2, p_u)
187158

188159
def test_transition_tensor_game(self) -> None:
189160
"""
@@ -196,14 +167,12 @@ def test_transition_tensor_game(self) -> None:
196167
attacker_actions = [0, 1]
197168
p_a = 0.5
198169
p_c_1 = 0.3
199-
result = IntrusionRecoveryPomdpUtil.transition_tensor_game(
200-
states, defender_actions, attacker_actions, p_a, p_c_1
201-
)
170+
result = IntrusionRecoveryPomdpUtil.transition_tensor_game(states, defender_actions, attacker_actions, p_a,
171+
p_c_1)
202172
assert len(result) == len(defender_actions)
203173
assert all(len(a1) == len(attacker_actions) for a1 in result)
204174
assert all(len(a2) == len(states) for a1 in result for a2 in a1)
205175
assert all(len(s) == len(states) for a1 in result for a2 in a1 for s in a2)
206-
207176
assert result[0][1][0][0] == (1 - p_a) * (1 - p_c_1)
208177
assert result[1][0][1][1] == 0
209178
assert result[1][1][2][2] == 1.0
@@ -234,12 +203,8 @@ def test_sampe_next_observation(self) -> None:
234203
observation_tensor = [[0.8, 0.2], [0.4, 0.6]]
235204
s_prime = 1
236205
observations = [0, 1]
237-
assert isinstance(
238-
IntrusionRecoveryPomdpUtil.sample_next_observation(
239-
observation_tensor, s_prime, observations
240-
),
241-
int,
242-
)
206+
assert isinstance(IntrusionRecoveryPomdpUtil.sample_next_observation(observation_tensor, s_prime, observations),
207+
int)
243208

244209
def test_bayes_filter(self) -> None:
245210
"""
@@ -256,22 +221,9 @@ def test_bayes_filter(self) -> None:
256221
observation_tensor = [[0.8, 0.2], [0.4, 0.6]]
257222
transition_tensor = [[[0.6, 0.4], [0.1, 0.9]]]
258223
b_prime_s_prime = 0.7
259-
assert (
260-
round(
261-
IntrusionRecoveryPomdpUtil.bayes_filter(
262-
s_prime,
263-
o,
264-
a,
265-
b,
266-
states,
267-
observations,
268-
observation_tensor,
269-
transition_tensor,
270-
),
271-
1,
272-
)
273-
== b_prime_s_prime
274-
)
224+
assert (round(IntrusionRecoveryPomdpUtil.bayes_filter(s_prime, o, a, b, states, observations,
225+
observation_tensor, transition_tensor), 1)
226+
== b_prime_s_prime)
275227

276228
def test_p_o_given_b_a1_a2(self) -> None:
277229
"""
@@ -286,15 +238,8 @@ def test_p_o_given_b_a1_a2(self) -> None:
286238
observation_tensor = [[0.8, 0.2], [0.4, 0.6]]
287239
transition_tensor = [[[0.6, 0.4], [0.1, 0.9]]]
288240
expected = 0.5
289-
assert (
290-
round(
291-
IntrusionRecoveryPomdpUtil.p_o_given_b_a1_a2(
292-
o, b, a, states, transition_tensor, observation_tensor
293-
),
294-
1,
295-
)
296-
== expected
297-
)
241+
assert (round(IntrusionRecoveryPomdpUtil.p_o_given_b_a1_a2(o, b, a, states, transition_tensor,
242+
observation_tensor), 1) == expected)
298243

299244
def test_next_belief(self) -> None:
300245
"""
@@ -309,23 +254,8 @@ def test_next_belief(self) -> None:
309254
observations = [0, 1]
310255
observation_tensor = [[0.8, 0.2], [0.4, 0.6]]
311256
transition_tensor = [[[0.3, 0.7], [0.6, 0.4]]]
312-
assert (
313-
round(
314-
sum(
315-
IntrusionRecoveryPomdpUtil.next_belief(
316-
o,
317-
a,
318-
b,
319-
states,
320-
observations,
321-
observation_tensor,
322-
transition_tensor,
323-
)
324-
),
325-
1,
326-
)
327-
== 1
328-
)
257+
assert (round(sum(IntrusionRecoveryPomdpUtil.next_belief(o, a, b, states, observations, observation_tensor,
258+
transition_tensor)), 1) == 1)
329259

330260
def test_pomdp_solver_file(self) -> None:
331261
"""
@@ -334,33 +264,14 @@ def test_pomdp_solver_file(self) -> None:
334264
:return: None
335265
"""
336266

337-
assert (
338-
IntrusionRecoveryPomdpUtil.pomdp_solver_file(
339-
IntrusionRecoveryPomdpConfig(
340-
eta=0.1,
341-
p_a=0.2,
342-
p_c_1=0.2,
343-
p_c_2=0.3,
344-
p_u=0.3,
345-
BTR=1,
346-
negate_costs=True,
347-
seed=1,
348-
discount_factor=0.5,
349-
states=[0, 1],
350-
actions=[0],
351-
observations=[0, 1],
352-
cost_tensor=[[0.1, 0.5], [0.5, 0.6]],
353-
observation_tensor=[[0.8, 0.2], [0.4, 0.6]],
354-
transition_tensor=[[[0.8, 0.2], [0.6, 0.4]]],
355-
b1=[0.3, 0.7],
356-
T=3,
357-
simulation_env_name="env",
358-
gym_env_name="gym",
359-
max_horizon=np.inf,
360-
)
361-
)
362-
is not None
363-
)
267+
assert (IntrusionRecoveryPomdpUtil.pomdp_solver_file(
268+
IntrusionRecoveryPomdpConfig(eta=0.1, p_a=0.2, p_c_1=0.2, p_c_2=0.3, p_u=0.3, BTR=1, negate_costs=True,
269+
seed=1, discount_factor=0.5, states=[0, 1], actions=[0], observations=[0, 1],
270+
cost_tensor=[[0.1, 0.5], [0.5, 0.6]],
271+
observation_tensor=[[0.8, 0.2], [0.4, 0.6]],
272+
transition_tensor=[[[0.8, 0.2], [0.6, 0.4]]], b1=[0.3, 0.7], T=3,
273+
simulation_env_name="env", gym_env_name="gym", max_horizon=np.inf))
274+
is not None)
364275

365276
def test_sample_next_state_game(self) -> None:
366277
"""
@@ -444,9 +355,7 @@ def test_generate_transitions(self) -> None:
444355
gym_env_name="gym_env",
445356
max_horizon=1000,
446357
)
447-
assert (
448-
IntrusionRecoveryPomdpUtil.generate_transitions(dto)[0] == "0 0 0 0 0 0.06"
449-
)
358+
assert IntrusionRecoveryPomdpUtil.generate_transitions(dto)[0] == "0 0 0 0 0 0.06"
450359

451360
def test_generate_rewards(self) -> None:
452361
"""
@@ -502,7 +411,11 @@ def test_generate_rewards(self) -> None:
502411
assert IntrusionRecoveryPomdpUtil.generate_rewards(dto)[0] == "0 0 0 -1"
503412

504413
def test_generate_os_posg_game_file(self) -> None:
505-
""" """
414+
"""
415+
Tests the generate_os_posg_game function
416+
417+
:return: None
418+
"""
506419

507420
states = [0, 1, 2]
508421
actions = [0, 1]
@@ -580,24 +493,13 @@ def test_generate_os_posg_game_file(self) -> None:
580493

581494
output_lines = game_file_str.split("\n")
582495

583-
assert (
584-
output_lines[0] == expected_game_description
585-
), f"Game description mismatch: {output_lines[0]}"
586-
assert (
587-
output_lines[1:4] == expected_state_descriptions
588-
), f"State descriptions mismatch: {output_lines[1:4]}"
589-
assert (
590-
output_lines[4:6] == expected_player_1_actions
591-
), f"Player 1 actions mismatch: {output_lines[4:6]}"
592-
assert (
593-
output_lines[6:8] == expected_player_2_actions
594-
), f"Player 2 actions mismatch: {output_lines[6:8]}"
595-
assert (
596-
output_lines[8:10] == expected_obs_descriptions
597-
), f"Observation descriptions mismatch: {output_lines[8:10]}"
598-
assert (
599-
output_lines[10:13] == expected_player_2_legal_actions
600-
), f"Player 2 legal actions mismatch: {output_lines[10:13]}"
601-
assert (
602-
output_lines[13:14] == expected_player_1_legal_actions
603-
), f"Player 1 legal actions mismatch: {output_lines[13:14]}"
496+
assert (output_lines[0] == expected_game_description), f"Game description mismatch: {output_lines[0]}"
497+
assert (output_lines[1:4] == expected_state_descriptions), f"State descriptions mismatch: {output_lines[1:4]}"
498+
assert (output_lines[4:6] == expected_player_1_actions), f"Player 1 actions mismatch: {output_lines[4:6]}"
499+
assert (output_lines[6:8] == expected_player_2_actions), f"Player 2 actions mismatch: {output_lines[6:8]}"
500+
assert (output_lines[8:10] == expected_obs_descriptions), \
501+
f"Observation descriptions mismatch: {output_lines[8:10]}"
502+
assert (output_lines[10:13] == expected_player_2_legal_actions), \
503+
f"Player 2 legal actions mismatch: {output_lines[10:13]}"
504+
assert (output_lines[13:14] == expected_player_1_legal_actions), \
505+
f"Player 1 legal actions mismatch: {output_lines[13:14]}"

simulation-system/libs/gym-csle-stopping-game/src/gym_csle_stopping_game/envs/stopping_game_env.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def step(self, action_profile: Tuple[int, Tuple[npt.NDArray[Any], int]]) \
7272
a1, a2_profile = action_profile
7373
pi2, a2 = a2_profile
7474
assert pi2.shape[0] == len(self.config.S)
75-
assert pi2.shape[1] == len(self.config.A1)
75+
assert pi2.shape[1] == len(self.config.A2)
7676
done = False
7777
info: Dict[str, Any] = {}
7878

@@ -83,8 +83,7 @@ def step(self, action_profile: Tuple[int, Tuple[npt.NDArray[Any], int]]) \
8383
else:
8484
# Compute r, s', b',o'
8585
r = self.config.R[self.state.l - 1][a1][a2][self.state.s]
86-
self.state.s = StoppingGameUtil.sample_next_state(l=self.state.l, a1=a1, a2=a2,
87-
T=self.config.T,
86+
self.state.s = StoppingGameUtil.sample_next_state(l=self.state.l, a1=a1, a2=a2, T=self.config.T,
8887
S=self.config.S, s=self.state.s)
8988
o = StoppingGameUtil.sample_next_observation(Z=self.config.Z,
9089
O=self.config.O, s_prime=self.state.s)

0 commit comments

Comments
 (0)