Skip to content

Commit 7491176

Browse files
committed
Update type annotations
1 parent 439f8a0 commit 7491176

File tree

1 file changed

+22
-22
lines changed

1 file changed

+22
-22
lines changed

simulation-system/libs/gym-csle-stopping-game/src/gym_csle_stopping_game/util/stopping_game_util.py

+22-22
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ def pomdp_solver_file(config: StoppingGameConfig, discount_factor: float, pi2: n
390390
return file_str
391391

392392
@staticmethod
393-
def reduce_T_attacker(T: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArray[np.float_]:
393+
def reduce_T_attacker(T: npt.NDArray[np.float64], strategy: Policy) -> npt.NDArray[np.float64]:
394394
"""
395395
Reduces the transition tensor based on a given attacker strategy
396396
@@ -415,7 +415,7 @@ def reduce_T_attacker(T: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArra
415415
return reduced_T
416416

417417
@staticmethod
418-
def reduce_R_attacker(R: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArray[np.float_]:
418+
def reduce_R_attacker(R: npt.NDArray[np.float64], strategy: Policy) -> npt.NDArray[np.float64]:
419419
"""
420420
Reduces the reward tensor based on a given attacker strategy
421421
@@ -433,7 +433,7 @@ def reduce_R_attacker(R: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArra
433433
return reduced_R
434434

435435
@staticmethod
436-
def reduce_Z_attacker(Z: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArray[np.float_]:
436+
def reduce_Z_attacker(Z: npt.NDArray[np.float64], strategy: Policy) -> npt.NDArray[np.float64]:
437437
"""
438438
Reduces the observation tensor based on a given attacker strategy
439439
@@ -450,7 +450,7 @@ def reduce_Z_attacker(Z: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArra
450450
return reduced_Z
451451

452452
@staticmethod
453-
def reduce_T_defender(T: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArray[np.float_]:
453+
def reduce_T_defender(T: npt.NDArray[np.float64], strategy: Policy) -> npt.NDArray[np.float64]:
454454
"""
455455
Reduces the transition tensor based on a given defender strategy
456456
@@ -469,7 +469,7 @@ def reduce_T_defender(T: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArra
469469
return reduced_T
470470

471471
@staticmethod
472-
def reduce_R_defender(R: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArray[np.float_]:
472+
def reduce_R_defender(R: npt.NDArray[np.float64], strategy: Policy) -> npt.NDArray[np.float64]:
473473
"""
474474
Reduces the reward tensor based on a given defender strategy
475475
@@ -487,10 +487,10 @@ def reduce_R_defender(R: npt.NDArray[np.float_], strategy: Policy) -> npt.NDArra
487487
return reduced_R
488488

489489
@staticmethod
490-
def aggregate_belief_mdp_defender(aggregation_resolution: int, T: npt.NDArray[np.float_],
491-
R: npt.NDArray[np.float_], Z: npt.NDArray[np.float_],
490+
def aggregate_belief_mdp_defender(aggregation_resolution: int, T: npt.NDArray[np.float64],
491+
R: npt.NDArray[np.float64], Z: npt.NDArray[np.float64],
492492
S: npt.NDArray[np.int_], A: npt.NDArray[np.int_], O: npt.NDArray[np.int_]) \
493-
-> Tuple[npt.NDArray[np.float_], npt.NDArray[np.int_], npt.NDArray[np.float_], npt.NDArray[np.float_]]:
493+
-> Tuple[npt.NDArray[np.float64], npt.NDArray[np.int_], npt.NDArray[np.float64], npt.NDArray[np.float64]]:
494494
"""
495495
Generates an aggregate belief MDP from a given POMDP specification and aggregation resolution
496496
@@ -512,7 +512,7 @@ def aggregate_belief_mdp_defender(aggregation_resolution: int, T: npt.NDArray[np
512512
return aggregate_belief_space, A, belief_T, belief_R
513513

514514
@staticmethod
515-
def generate_aggregate_belief_space(n: int, belief_space_dimension: int) -> npt.NDArray[np.float_]:
515+
def generate_aggregate_belief_space(n: int, belief_space_dimension: int) -> npt.NDArray[np.float64]:
516516
"""
517517
Generate an aggregate belief space B_n.
518518
@@ -534,8 +534,8 @@ def generate_aggregate_belief_space(n: int, belief_space_dimension: int) -> npt.
534534

535535
@staticmethod
536536
def generate_aggregate_belief_reward_tensor(
537-
aggregate_belief_space: npt.NDArray[np.float_], S: npt.NDArray[np.int_], A: npt.NDArray[np.int_],
538-
R: npt.NDArray[np.float_]) -> npt.NDArray[np.float_]:
537+
aggregate_belief_space: npt.NDArray[np.float64], S: npt.NDArray[np.int_], A: npt.NDArray[np.int_],
538+
R: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
539539
"""
540540
Generates an aggregate reward tensor for the aggregate belief MDP
541541
@@ -557,8 +557,8 @@ def generate_aggregate_belief_reward_tensor(
557557

558558
@staticmethod
559559
def generate_aggregate_belief_transition_operator(
560-
aggregate_belief_space: npt.NDArray[np.float_], S: npt.NDArray[np.int_], A: npt.NDArray[np.int_],
561-
O: npt.NDArray[np.int_], T: npt.NDArray[np.float_], Z: npt.NDArray[np.float_]) -> npt.NDArray[np.float_]:
560+
aggregate_belief_space: npt.NDArray[np.float64], S: npt.NDArray[np.int_], A: npt.NDArray[np.int_],
561+
O: npt.NDArray[np.int_], T: npt.NDArray[np.float64], Z: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
562562
"""
563563
Generates an aggregate belief space transition operator
564564
@@ -581,11 +581,11 @@ def generate_aggregate_belief_transition_operator(
581581
return belief_T
582582

583583
@staticmethod
584-
def aggregate_belief_transition_probability(b1: npt.NDArray[np.float_], b2: npt.NDArray[np.float_], a: int,
584+
def aggregate_belief_transition_probability(b1: npt.NDArray[np.float64], b2: npt.NDArray[np.float64], a: int,
585585
S: npt.NDArray[np.int_], O: npt.NDArray[np.int_],
586586
A: npt.NDArray[np.int_],
587-
T: npt.NDArray[np.float_], Z: npt.NDArray[np.float_],
588-
aggregate_belief_space: npt.NDArray[np.float_]) -> float:
587+
T: npt.NDArray[np.float64], Z: npt.NDArray[np.float64],
588+
aggregate_belief_space: npt.NDArray[np.float64]) -> float:
589589
"""
590590
Calculates the probability of transitioning from belief b1 to belief b2 when taking action a
591591
@@ -616,8 +616,8 @@ def aggregate_belief_transition_probability(b1: npt.NDArray[np.float_], b2: npt.
616616

617617
@staticmethod
618618
def pomdp_next_belief(o: int, a: int, b: npt.NDArray[np.float64], states: npt.NDArray[np.int_],
619-
observations: npt.NDArray[np.int_], observation_tensor: npt.NDArray[np.float_],
620-
transition_tensor: npt.NDArray[np.float_]) \
619+
observations: npt.NDArray[np.int_], observation_tensor: npt.NDArray[np.float64],
620+
transition_tensor: npt.NDArray[np.float64]) \
621621
-> npt.NDArray[np.float64]:
622622
"""
623623
Computes the next belief of the POMDP using a Bayesian filter
@@ -643,8 +643,8 @@ def pomdp_next_belief(o: int, a: int, b: npt.NDArray[np.float64], states: npt.ND
643643

644644
@staticmethod
645645
def pomdp_bayes_filter(s_prime: int, o: int, a: int, b: npt.NDArray[np.float64], states: npt.NDArray[np.int_],
646-
observations: npt.NDArray[np.int_], observation_tensor: npt.NDArray[np.float_],
647-
transition_tensor: npt.NDArray[np.float_]) -> float:
646+
observations: npt.NDArray[np.int_], observation_tensor: npt.NDArray[np.float64],
647+
transition_tensor: npt.NDArray[np.float64]) -> float:
648648
"""
649649
A Bayesian filter to compute b[s_prime] of the POMDP
650650
@@ -679,8 +679,8 @@ def pomdp_bayes_filter(s_prime: int, o: int, a: int, b: npt.NDArray[np.float64],
679679
return b_prime_s_prime
680680

681681
@staticmethod
682-
def find_nearest_neighbor_belief(belief_space: npt.NDArray[np.float_], target_belief: npt.NDArray[np.float_]) \
683-
-> npt.NDArray[np.float_]:
682+
def find_nearest_neighbor_belief(belief_space: npt.NDArray[np.float64], target_belief: npt.NDArray[np.float64]) \
683+
-> npt.NDArray[np.float64]:
684684
"""
685685
Finds the nearest neighbor (in the Euclidean sense) of a given belief in a certain belief space
686686

0 commit comments

Comments
 (0)