Skip to content

Commit 3ae714f

Browse files
committed
update contributors
1 parent fa2ece0 commit 3ae714f

File tree

5 files changed

+20
-7
lines changed

5 files changed

+20
-7
lines changed

README.md

+1
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ Thanks go to these people!
168168
</tr>
169169
<tr>
170170
<td align="center" valign="top" width="14.28%"><a href="https://github.com/kingxiaofire"><img src="https://github.com/kingxiaofire.png" width="100px;" alt="Yan Wang"/><br /><sub><b>Yan Wang</b></sub></a></td>
171+
<td align="center" valign="top" width="14.28%"><a href="https://github.com/Awsnaser"><img src="https://github.com/Awsnaser.png" width="100px;" alt="Aws Jaber"/><br /><sub><b>Aws Jaber</b></sub></a></td>
171172
</tr>
172173
</tbody>
173174
</table>

docs/_docs/contributing.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -36,4 +36,5 @@ should be on the list but is not):
3636
- Nils Forsgren, software development.
3737
- Bength Roland Pappila, software development.
3838
- Yu Hu, software development.
39-
- Yan Wang, software development.
39+
- Yan Wang, software development.
40+
- Aws Jaber, software development.

examples/eval/cyborg_scenario_two/eval_on_base_env.py

+12-5
Original file line numberDiff line numberDiff line change
@@ -13,22 +13,29 @@
1313
maximum_steps=100, red_agent_distribution=[1.0], reduced_action_space=True, decoy_state=True,
1414
scanned_state=True, decoy_optimization=False, cache_visited_states=False)
1515
csle_cyborg_env = CyborgScenarioTwoDefender(config=config)
16-
num_evaluations = 10000
17-
max_horizon = 100
16+
num_evaluations = 1
17+
max_horizon = 20
1818
returns = []
1919
seed = 215125
2020
random.seed(seed)
2121
np.random.seed(seed)
2222
torch.manual_seed(seed)
23-
print("Starting policy evaluation")
23+
# print(csle_cyborg_env.action_id_to_type_and_host)
24+
# import sys
25+
# sys.exit(0)
26+
# print("Starting policy evaluation")
2427
for i in range(num_evaluations):
2528
o, _ = csle_cyborg_env.reset()
2629
R = 0
2730
t = 0
2831
while t < max_horizon:
29-
a = ppo_policy.action(o=o)
32+
# a = ppo_policy.action(o=o)
33+
a = 4
3034
o, r, done, _, info = csle_cyborg_env.step(a)
35+
table = csle_cyborg_env.get_true_table()
36+
print(table)
37+
print(r)
3138
R += r
3239
t += 1
3340
returns.append(R)
34-
print(f"{i}/{num_evaluations}, avg R: {np.mean(returns)}, R: {R}")
41+
# print(f"{i}/{num_evaluations}, avg R: {np.mean(returns)}, R: {R}")

examples/eval/cyborg_scenario_two/evaluate_on_wrapper_env.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,14 @@
33
import random
44
from csle_common.metastore.metastore_facade import MetastoreFacade
55
from gym_csle_cyborg.envs.cyborg_scenario_two_wrapper import CyborgScenarioTwoWrapper
6+
from gym_csle_cyborg.dao.red_agent_type import RedAgentType
67
from gym_csle_cyborg.dao.csle_cyborg_wrapper_config import CSLECyborgWrapperConfig
78

89
if __name__ == '__main__':
910
ppo_policy = MetastoreFacade.get_ppo_policy(id=58)
1011
config = CSLECyborgWrapperConfig(maximum_steps=100, gym_env_name="",
11-
save_trace=False, reward_shaping=False, scenario=2)
12+
save_trace=False, reward_shaping=False, scenario=2,
13+
red_agent_type=RedAgentType.B_LINE_AGENT)
1214
env = CyborgScenarioTwoWrapper(config=config)
1315
num_evaluations = 10000
1416
max_horizon = 100
@@ -25,6 +27,7 @@
2527
while t < max_horizon:
2628
a = ppo_policy.action(o=o)
2729
o, r, done, _, info = env.step(a)
30+
env.show
2831
R += r
2932
t += 1
3033
returns.append(R)

simulation-system/libs/csle-common/tests/test_snort_ids_controller.py

+1
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,7 @@ def test_get_snort_managers_info(
418418
:param mock_get_ports:mock_get_ports
419419
:param mock_get_ips: mock_get_ips
420420
421+
421422
:return: None
422423
"""
423424
mock_get_ips.return_value = ["10.0.0.1", "10.0.0.2"]

0 commit comments

Comments
 (0)