Skip to content

bddl_utils

BDDLEntity

Bases: Wrapper

Thin wrapper class that wraps an object or system if it exists, or nothing if it does not exist. Will dynamically reference an object / system as they become real in the sim

Source code in omnigibson/utils/bddl_utils.py
class BDDLEntity(Wrapper):
    """
    Thin wrapper class that wraps an object or system if it exists, or nothing if it does not exist. Will
    dynamically reference an object / system as they become real in the sim
    """

    def __init__(
        self,
        bddl_inst,
        entity=None,
    ):
        """
        Args:
            bddl_inst (str): BDDL synset instance of the entity, e.g.: "almond.n.01_1"
            entity (None or DatasetObject or BaseSystem): If specified, the BDDL entity to wrap. If not
                specified, will initially wrap nothing, but may dynamically reference an actual object or system
                if it exists in the future
        """
        # Store synset and other info, and pass entity internally
        self.bddl_inst = bddl_inst
        self.synset = "_".join(self.bddl_inst.split("_")[:-1])
        self.is_system = is_substance_synset(self.synset)

        # Infer the correct category to assign
        self.og_categories = (
            OBJECT_TAXONOMY.get_subtree_substances(self.synset)
            if self.is_system
            else OBJECT_TAXONOMY.get_subtree_categories(self.synset)
        )

        super().__init__(obj=entity)

    @property
    def name(self):
        """
        Returns:
            None or str: Name of this entity, if it exists, else None
        """
        if self.exists:
            return self.og_categories[0] if self.is_system else self.wrapped_obj.name
        else:
            return None

    @property
    def exists(self):
        """
        Checks whether the entity referenced by @synset exists

        Returns:
            bool: Whether the entity referenced by @synset exists
        """
        return self.wrapped_obj is not None

    def set_entity(self, entity):
        """
        Sets the internal entity, overriding any if it already exists

        Args:
            entity (BaseSystem or BaseObject): Entity to set internally
        """
        self.wrapped_obj = entity

    def clear_entity(self):
        """
        Clears the internal entity, if any
        """
        self.wrapped_obj = None

    def get_state(self, state, *args, **kwargs):
        """
        Helper function to grab wrapped entity's state @state

        Args:
            state (BaseObjectState): State whose get_value() should be called
            *args (tuple): Any arguments to pass to getter, in order
            **kwargs (dict): Any keyword arguments to pass to getter, in order

        Returns:
            any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None), else False
        """
        return self.wrapped_obj.states[state].get_value(*args, **kwargs) if self.exists else False

    def set_state(self, state, *args, **kwargs):
        """
        Helper function to set wrapped entity's state @state. Note: Should only be called if the entity exists!

        Args:
            state (BaseObjectState): State whose set_value() should be called
            *args (tuple): Any arguments to pass to getter, in order
            **kwargs (dict): Any keyword arguments to pass to getter, in order

        Returns:
            any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None)
        """
        assert self.exists, f"Cannot call set_state() for BDDLEntity {self.synset} when the entity does not exist!"
        return self.wrapped_obj.states[state].set_value(*args, **kwargs)

exists property

Checks whether the entity referenced by @synset exists

Returns:

Type Description
bool

Whether the entity referenced by @synset exists

name property

Returns:

Type Description
None or str

Name of this entity, if it exists, else None

__init__(bddl_inst, entity=None)

Parameters:

Name Type Description Default
bddl_inst str

BDDL synset instance of the entity, e.g.: "almond.n.01_1"

required
entity None or DatasetObject or BaseSystem

If specified, the BDDL entity to wrap. If not specified, will initially wrap nothing, but may dynamically reference an actual object or system if it exists in the future

None
Source code in omnigibson/utils/bddl_utils.py
def __init__(
    self,
    bddl_inst,
    entity=None,
):
    """
    Args:
        bddl_inst (str): BDDL synset instance of the entity, e.g.: "almond.n.01_1"
        entity (None or DatasetObject or BaseSystem): If specified, the BDDL entity to wrap. If not
            specified, will initially wrap nothing, but may dynamically reference an actual object or system
            if it exists in the future
    """
    # Store synset and other info, and pass entity internally
    self.bddl_inst = bddl_inst
    self.synset = "_".join(self.bddl_inst.split("_")[:-1])
    self.is_system = is_substance_synset(self.synset)

    # Infer the correct category to assign
    self.og_categories = (
        OBJECT_TAXONOMY.get_subtree_substances(self.synset)
        if self.is_system
        else OBJECT_TAXONOMY.get_subtree_categories(self.synset)
    )

    super().__init__(obj=entity)

clear_entity()

Clears the internal entity, if any

Source code in omnigibson/utils/bddl_utils.py
def clear_entity(self):
    """
    Clears the internal entity, if any
    """
    self.wrapped_obj = None

get_state(state, *args, **kwargs)

Helper function to grab wrapped entity's state @state

Parameters:

Name Type Description Default
state BaseObjectState

State whose get_value() should be called

required
*args tuple

Any arguments to pass to getter, in order

()
**kwargs dict

Any keyword arguments to pass to getter, in order

{}

Returns:

Type Description
any

Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None), else False

Source code in omnigibson/utils/bddl_utils.py
def get_state(self, state, *args, **kwargs):
    """
    Helper function to grab wrapped entity's state @state

    Args:
        state (BaseObjectState): State whose get_value() should be called
        *args (tuple): Any arguments to pass to getter, in order
        **kwargs (dict): Any keyword arguments to pass to getter, in order

    Returns:
        any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None), else False
    """
    return self.wrapped_obj.states[state].get_value(*args, **kwargs) if self.exists else False

set_entity(entity)

Sets the internal entity, overriding any if it already exists

Parameters:

Name Type Description Default
entity BaseSystem or BaseObject

Entity to set internally

required
Source code in omnigibson/utils/bddl_utils.py
def set_entity(self, entity):
    """
    Sets the internal entity, overriding any if it already exists

    Args:
        entity (BaseSystem or BaseObject): Entity to set internally
    """
    self.wrapped_obj = entity

set_state(state, *args, **kwargs)

Helper function to set wrapped entity's state @state. Note: Should only be called if the entity exists!

Parameters:

Name Type Description Default
state BaseObjectState

State whose set_value() should be called

required
*args tuple

Any arguments to pass to getter, in order

()
**kwargs dict

Any keyword arguments to pass to getter, in order

{}

Returns:

Type Description
any

Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None)

Source code in omnigibson/utils/bddl_utils.py
def set_state(self, state, *args, **kwargs):
    """
    Helper function to set wrapped entity's state @state. Note: Should only be called if the entity exists!

    Args:
        state (BaseObjectState): State whose set_value() should be called
        *args (tuple): Any arguments to pass to getter, in order
        **kwargs (dict): Any keyword arguments to pass to getter, in order

    Returns:
        any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None)
    """
    assert self.exists, f"Cannot call set_state() for BDDLEntity {self.synset} when the entity does not exist!"
    return self.wrapped_obj.states[state].set_value(*args, **kwargs)

BDDLSampler

Source code in omnigibson/utils/bddl_utils.py
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
class BDDLSampler:
    def __init__(self, env, activity_conditions, object_scope, backend):
        # Avoid circular imports here
        from omnigibson.scenes.traversable_scene import TraversableScene

        # Store internal variables from inputs
        self._env = env
        self._scene_model = self._env.scene.scene_model if isinstance(self._env.scene, TraversableScene) else None
        self._agent = self._env.robots[0]
        self._backend = backend
        self._activity_conditions = activity_conditions
        self._object_scope = object_scope
        self._object_instance_to_synset = {
            obj_inst: obj_cat
            for obj_cat in self._activity_conditions.parsed_objects
            for obj_inst in self._activity_conditions.parsed_objects[obj_cat]
        }
        self._substance_instances = {
            obj_inst
            for obj_inst in self._object_scope.keys()
            if is_substance_synset(self._object_instance_to_synset[obj_inst])
        }

        # Initialize other variables that will be filled in later
        self._room_type_to_object_instance = None  # dict
        self._inroom_object_instances = None  # set of str
        self._object_sampling_orders = None  # dict mapping str to list of str
        self._sampled_objects = None  # set of BaseObject
        self._future_obj_instances = None  # set of str
        self._inroom_object_conditions = None  # list of (condition, positive) tuple
        self._inroom_object_scope_filtered_initial = None  # dict mapping str to BDDLEntity
        self._attached_objects = defaultdict(set)  # dict mapping str to set of str

    def sample(self, validate_goal=False):
        """
        Run sampling for this BEHAVIOR task

        Args:
            validate_goal (bool): Whether the goal should be validated or not

        Returns:
            2-tuple:
                - bool: Whether sampling was successful or not
                - None or str: None if successful, otherwise the associated error message
        """
        log.info("Sampling task...")
        # Reject scenes with missing non-sampleable objects
        # Populate object_scope with sampleable objects and the robot
        accept_scene, feedback = self._prepare_scene_for_sampling()
        if not accept_scene:
            return accept_scene, feedback
        # Sample objects to satisfy initial conditions
        accept_scene, feedback = self._sample_all_conditions(validate_goal=validate_goal)
        if not accept_scene:
            return accept_scene, feedback

        log.info("Sampling succeeded!")

        return True, None

    def _sample_all_conditions(self, validate_goal=False):
        """
        Run sampling for this BEHAVIOR task

        Args:
            validate_goal (bool): Whether the goal should be validated or not

        Returns:
            2-tuple:
                - bool: Whether sampling was successful or not
                - None or str: None if successful, otherwise the associated error message
        """
        # Auto-initialize all sampleable objects
        with og.sim.playing():
            self._env.scene.reset()

            error_msg = self._sample_initial_conditions()
            if error_msg:
                log.error(error_msg)
                return False, error_msg

            if validate_goal:
                error_msg = self._sample_goal_conditions()
                if error_msg:
                    log.error(error_msg)
                    return False, error_msg

            error_msg = self._sample_initial_conditions_final()
            if error_msg:
                log.error(error_msg)
                return False, error_msg

            self._env.scene.update_initial_state()

        return True, None

    def _prepare_scene_for_sampling(self):
        """
        Runs sanity checks for the current scene for the given BEHAVIOR task

        Returns:
            2-tuple:
                - bool: Whether the generated scene activity should be accepted or not
                - dict: Any feedback from the sampling / initialization process
        """
        error_msg = self._parse_inroom_object_room_assignment()
        if error_msg:
            log.error(error_msg)
            return False, error_msg

        error_msg = self._parse_attached_states()
        if error_msg:
            log.error(error_msg)
            return False, error_msg

        error_msg = self._build_sampling_order()
        if error_msg:
            log.error(error_msg)
            return False, error_msg

        error_msg = self._build_inroom_object_scope()
        if error_msg:
            log.error(error_msg)
            return False, error_msg

        error_msg = self._import_sampleable_objects()
        if error_msg:
            log.error(error_msg)
            return False, error_msg

        self._object_scope["agent.n.01_1"] = BDDLEntity(bddl_inst="agent.n.01_1", entity=self._agent)

        return True, None

    def _parse_inroom_object_room_assignment(self):
        """
        Infers which rooms each object is assigned to
        """
        self._room_type_to_object_instance = dict()
        self._inroom_object_instances = set()
        for cond in self._activity_conditions.parsed_initial_conditions:
            if cond[0] == "inroom":
                obj_inst, room_type = cond[1], cond[2]
                obj_synset = self._object_instance_to_synset[obj_inst]
                abilities = OBJECT_TAXONOMY.get_abilities(obj_synset)
                if "sceneObject" not in abilities:
                    # Invalid room assignment
                    return (
                        f"You have assigned room type for [{obj_synset}], but [{obj_synset}] is sampleable. "
                        f"Only non-sampleable (scene) objects can have room assignment."
                    )
                if self._scene_model is not None and room_type not in self._env.scene.seg_map.room_sem_name_to_ins_name:
                    # Missing room type
                    return f"Room type [{room_type}] missing in scene [{self._scene_model}]."
                if room_type not in self._room_type_to_object_instance:
                    self._room_type_to_object_instance[room_type] = []
                self._room_type_to_object_instance[room_type].append(obj_inst)

                if obj_inst in self._inroom_object_instances:
                    # Duplicate room assignment
                    return f"Object [{obj_inst}] has more than one room assignment"

                self._inroom_object_instances.add(obj_inst)

    def _parse_attached_states(self):
        """
        Infers which objects are attached to which other objects.
        If a category-level attachment is specified, it will be expanded to all instances of that category.
        E.g. if the goal condition requires corks to be attached to bottles, every cork needs to be able to
        attach to every bottle.
        """
        for cond in self._activity_conditions.parsed_initial_conditions:
            if cond[0] == "attached":
                obj_inst, parent_inst = cond[1], cond[2]
                if obj_inst not in self._object_scope or parent_inst not in self._object_scope:
                    return f"Object [{obj_inst}] or parent [{parent_inst}] in attached initial condition not found in object scope"
                self._attached_objects[obj_inst].add(parent_inst)

        ground_attached_conditions = []
        conditions_to_check = self._activity_conditions.parsed_goal_conditions.copy()
        while conditions_to_check:
            new_conditions_to_check = []
            for cond in conditions_to_check:
                if cond[0] == "attached":
                    ground_attached_conditions.append(cond)
                else:
                    new_conditions_to_check.extend([ele for ele in cond if isinstance(ele, list)])
            conditions_to_check = new_conditions_to_check

        for cond in ground_attached_conditions:
            obj_inst, parent_inst = cond[1].lstrip("?"), cond[2].lstrip("?")
            if obj_inst in self._object_scope:
                obj_insts = [obj_inst]
            elif obj_inst in self._activity_conditions.parsed_objects:
                obj_insts = self._activity_conditions.parsed_objects[obj_inst]
            else:
                return f"Object [{obj_inst}] in attached goal condition not found in object scope or parsed objects"

            if parent_inst in self._object_scope:
                parent_insts = [parent_inst]
            elif parent_inst in self._activity_conditions.parsed_objects:
                parent_insts = self._activity_conditions.parsed_objects[parent_inst]
            else:
                return f"Parent [{parent_inst}] in attached goal condition not found in object scope or parsed objects"

            for obj_inst in obj_insts:
                for parent_inst in parent_insts:
                    self._attached_objects[obj_inst].add(parent_inst)

    def _build_sampling_order(self):
        """
        Sampling orders is a list of lists: [[batch_1_inst_1, ... batch_1_inst_N], [batch_2_inst_1, batch_2_inst_M], ...]
        Sampling should happen for batch 1 first, then batch 2, so on and so forth
        Example: OnTop(plate, table) should belong to batch 1, and OnTop(apple, plate) should belong to batch 2
        """
        unsampleable_conditions = []
        sampling_groups = {group: [] for group in ("kinematic", "particle", "unary")}
        self._object_sampling_conditions = {group: [] for group in ("kinematic", "particle", "unary")}
        self._object_sampling_orders = {group: [] for group in ("kinematic", "particle", "unary")}
        self._inroom_object_conditions = []

        # First, sort initial conditions into kinematic, particle and unary groups
        # bddl.condition_evaluation.HEAD, each with one child.
        # This child is either a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate or
        # a Negation of a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate
        for condition in get_initial_conditions(self._activity_conditions, self._backend, self._object_scope):
            condition, positive = process_single_condition(condition)
            if condition is None:
                continue

            # Sampled conditions must always be positive
            # Non-positive (e.g.: NOT onTop) is not restrictive enough for sampling
            if condition.STATE_NAME in KINEMATIC_STATES_BDDL and not positive:
                return "Initial condition has negative kinematic conditions: {}".format(condition.body)

            # Store any unsampleable conditions separately
            if isinstance(condition, UnsampleablePredicate):
                unsampleable_conditions.append(condition)
                continue

            # Infer the group the condition and its object instances belong to
            # (a) Kinematic (binary) conditions, where (ent0, ent1) are both objects
            # (b) Particle (binary) conditions, where (ent0, ent1) are (object, substance)
            # (d) Unary conditions, where (ent0,) is an object
            # Binary conditions have length 2: (ent0, ent1)
            if len(condition.body) == 2:
                group = "particle" if condition.body[1] in self._substance_instances else "kinematic"
            else:
                assert len(condition.body) == 1, (
                    f"Got invalid parsed initial condition; body length should either be 2 or 1. "
                    f"Got body: {condition.body} for condition: {condition}"
                )
                group = "unary"
            sampling_groups[group].append(condition.body)
            self._object_sampling_conditions[group].append((condition, positive))

            # If the condition involves any non-sampleable object (e.g.: furniture), it's a non-sampleable condition
            # This means that there's no ordering constraint in terms of sampling, because we know the, e.g., furniture
            # object already exists in the scene and is placed, so these specific conditions can be sampled without
            # any dependencies
            if len(self._inroom_object_instances.intersection(set(condition.body))) > 0:
                self._inroom_object_conditions.append((condition, positive))

        # Now, sort each group, ignoring the futures (since they don't get sampled)
        # First handle kinematics, then particles, then unary

        # Start with the non-sampleable objects as the first sampled set, then infer recursively
        cur_batch = self._inroom_object_instances
        while len(cur_batch) > 0:
            next_batch = set()
            for cur_batch_inst in cur_batch:
                inst_batch = set()
                for condition, _ in self._object_sampling_conditions["kinematic"]:
                    if condition.body[1] == cur_batch_inst:
                        inst_batch.add(condition.body[0])
                        next_batch.add(condition.body[0])
                if len(inst_batch) > 0:
                    self._object_sampling_orders["kinematic"].append(inst_batch)
            cur_batch = next_batch

        # Now parse particles -- simply unordered, since particle systems shouldn't impact each other
        self._object_sampling_orders["particle"].append({cond[0] for cond in sampling_groups["particle"]})
        sampled_particle_entities = {cond[1] for cond in sampling_groups["particle"]}

        # Finally, parse unaries -- this is simply unordered, since it is assumed that unary predicates do not
        # affect each other
        self._object_sampling_orders["unary"].append({cond[0] for cond in sampling_groups["unary"]})

        # Aggregate future objects and any unsampleable obj instances
        # Unsampleable obj instances are strictly a superset of future obj instances
        unsampleable_obj_instances = {cond.body[-1] for cond in unsampleable_conditions}
        self._future_obj_instances = {
            cond.body[0] for cond in unsampleable_conditions if isinstance(cond, ObjectStateFuturePredicate)
        }

        nonparticle_entities = set(self._object_scope.keys()) - self._substance_instances

        # Sanity check kinematic objects -- any non-system must be kinematically sampled
        remaining_kinematic_entities = (
            nonparticle_entities
            - unsampleable_obj_instances
            - self._inroom_object_instances
            - set.union(*(self._object_sampling_orders["kinematic"] + [set()]))
        )

        # Possibly remove the agent entity if we're in an empty scene -- i.e.: no kinematic sampling needed for the
        # agent
        if self._scene_model is None:
            remaining_kinematic_entities -= {"agent.n.01_1"}

        if len(remaining_kinematic_entities) != 0:
            return (
                f"Some objects do not have any kinematic condition defined for them in the initial conditions: "
                f"{', '.join(remaining_kinematic_entities)}"
            )

        # Sanity check particle systems -- any non-future system must be sampled as part of particle groups
        remaining_particle_entities = self._substance_instances - unsampleable_obj_instances - sampled_particle_entities
        if len(remaining_particle_entities) != 0:
            return (
                f"Some systems do not have any particle condition defined for them in the initial conditions: "
                f"{', '.join(remaining_particle_entities)}"
            )

    def _build_inroom_object_scope(self):
        """
        Store simulator object options for non-sampleable objects in self.inroom_object_scope
        {
            "living_room": {
                "table1": {
                    "living_room_0": [URDFObject, URDFObject, URDFObject],
                    "living_room_1": [URDFObject]
                },
                "table2": {
                    "living_room_0": [URDFObject, URDFObject],
                    "living_room_1": [URDFObject, URDFObject]
                },
                "chair1": {
                    "living_room_0": [URDFObject],
                    "living_room_1": [URDFObject]
                },
            }
        }
        """
        room_type_to_scene_objs = {}
        for room_type in self._room_type_to_object_instance:
            room_type_to_scene_objs[room_type] = {}
            for obj_inst in self._room_type_to_object_instance[room_type]:
                room_type_to_scene_objs[room_type][obj_inst] = {}
                obj_synset = self._object_instance_to_synset[obj_inst]

                # We allow burners to be used as if they are stoves
                # No need to safeguard check for subtree_substances because inroom objects will never be substances
                categories = OBJECT_TAXONOMY.get_subtree_categories(obj_synset)

                # Grab all models that fully support all abilities for the corresponding category
                valid_models = {
                    cat: set(
                        get_all_object_category_models_with_abilities(
                            cat, OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(cat))
                        )
                    )
                    for cat in categories
                }
                valid_models = {
                    cat: (models if cat not in GOOD_MODELS else models.intersection(GOOD_MODELS[cat]))
                    - BAD_CLOTH_MODELS.get(cat, set())
                    for cat, models in valid_models.items()
                }
                valid_models = {
                    cat: self._filter_model_choices_by_attached_states(models, cat, obj_inst)
                    for cat, models in valid_models.items()
                }
                room_insts = (
                    [None]
                    if self._scene_model is None
                    else self._env.scene.seg_map.room_sem_name_to_ins_name[room_type]
                )
                for room_inst in room_insts:
                    # A list of scene objects that satisfy the requested categories
                    room_objs = self._env.scene.object_registry("in_rooms", room_inst, default_val=[])
                    scene_objs = [
                        obj
                        for obj in room_objs
                        if obj.category in categories and obj.model in valid_models[obj.category]
                    ]

                    if len(scene_objs) != 0:
                        room_type_to_scene_objs[room_type][obj_inst][room_inst] = scene_objs

        error_msg = self._consolidate_room_instance(room_type_to_scene_objs, "initial_pre-sampling")
        if error_msg:
            return error_msg
        self._inroom_object_scope = room_type_to_scene_objs

    def _filter_object_scope(self, input_object_scope, conditions, condition_type):
        """
        Filters the object scope based on given @input_object_scope, @conditions, and @condition_type

        Args:
            input_object_scope (dict):
            conditions (list): List of conditions to filter scope with, where each list entry is
                a tuple of (condition, positive), where @positive is True if the condition has a positive
                evaluation.
            condition_type (str): What type of condition to sample, e.g., "initial"

        Returns:
            2-tuple:

                - dict: Filtered object scope
                - list of str: The name of children object(s) that have the highest proportion of kinematic sampling
                    failures
        """
        filtered_object_scope = {}
        # Maps child obj name (SCOPE name) to parent obj name (OBJECT name) to T / F,
        # ie: if the kinematic relationship was sampled successfully
        problematic_objs = defaultdict(dict)
        for room_type in input_object_scope:
            filtered_object_scope[room_type] = {}
            for scene_obj in input_object_scope[room_type]:
                filtered_object_scope[room_type][scene_obj] = {}
                for room_inst in input_object_scope[room_type][scene_obj]:
                    # These are a list of candidate simulator objects that need sampling test
                    for obj in input_object_scope[room_type][scene_obj][room_inst]:
                        # Temporarily set object_scope to point to this candidate object
                        self._object_scope[scene_obj] = BDDLEntity(bddl_inst=scene_obj, entity=obj)

                        success = True
                        # If this candidate object is not involved in any conditions,
                        # success will be True by default and this object will qualify
                        parent_obj_name = obj.name
                        conditions_to_sample = []
                        for condition, positive in conditions:
                            # Sample positive kinematic conditions that involve this candidate object
                            if (
                                condition.STATE_NAME in KINEMATIC_STATES_BDDL
                                and positive
                                and scene_obj in condition.body
                            ):
                                child_scope_name = condition.body[0]
                                entity = self._object_scope[child_scope_name]
                                conditions_to_sample.append((condition, positive, entity, child_scope_name))

                        # If we're sampling kinematics, sort children based on (a) whether they are cloth or not, and
                        # then (b) their AABB, so that first all rigid objects are sampled before all cloth objects,
                        # and within each group the larger objects are sampled first. This is needed because rigid
                        # objects currently don't detect collisions with cloth objects (rigid_obj.states[ContactBodies]
                        # is empty even when a cloth object is in contact with it).
                        rigid_conditions = [c for c in conditions_to_sample if c[2].prim_type != PrimType.CLOTH]
                        cloth_conditions = [c for c in conditions_to_sample if c[2].prim_type == PrimType.CLOTH]
                        conditions_to_sample = list(
                            reversed(sorted(rigid_conditions, key=lambda x: th.prod(x[2].aabb_extent)))
                        ) + list(reversed(sorted(cloth_conditions, key=lambda x: th.prod(x[2].aabb_extent))))

                        # Sample!
                        for condition, positive, entity, child_scope_name in conditions_to_sample:
                            kwargs = dict()
                            # Reset if we're sampling a kinematic state
                            if condition.STATE_NAME in {"inside", "ontop", "under"}:
                                kwargs["reset_before_sampling"] = True
                            elif condition.STATE_NAME in {"attached"}:
                                kwargs["bypass_alignment_checking"] = True
                                kwargs["check_physics_stability"] = True
                                kwargs["can_joint_break"] = False
                            success = condition.sample(binary_state=positive, **kwargs)
                            log_msg = " ".join(
                                [
                                    f"{condition_type} kinematic condition sampling",
                                    room_type,
                                    scene_obj,
                                    str(room_inst),
                                    parent_obj_name,
                                    condition.STATE_NAME,
                                    str(condition.body),
                                    str(success),
                                ]
                            )
                            log.info(log_msg)

                            # Record the result for the child object
                            assert (
                                parent_obj_name not in problematic_objs[child_scope_name]
                            ), f"Multiple kinematic relationships attempted for pair {condition.body}"
                            problematic_objs[child_scope_name][parent_obj_name] = success
                            # If any condition fails for this candidate object, skip
                            if not success:
                                break

                        # If this candidate object fails, move on to the next candidate object
                        if not success:
                            continue

                        if room_inst not in filtered_object_scope[room_type][scene_obj]:
                            filtered_object_scope[room_type][scene_obj][room_inst] = []
                        filtered_object_scope[room_type][scene_obj][room_inst].append(obj)

        # Compute most problematic objects
        if len(problematic_objs) == 0:
            max_problematic_objs = []
        else:
            problematic_objs_by_proportion = defaultdict(list)
            for child_scope_name, parent_obj_names in problematic_objs.items():
                problematic_objs_by_proportion[
                    th.mean(th.tensor(list(parent_obj_names.values()), dtype=th.float32)).item()
                ].append(child_scope_name)
            max_problematic_objs = problematic_objs_by_proportion[min(problematic_objs_by_proportion.keys())]

        return filtered_object_scope, max_problematic_objs

    def _consolidate_room_instance(self, filtered_object_scope, condition_type):
        """
        Consolidates room instances

        Args:
            filtered_object_scope (dict): Filtered object scope
            condition_type (str): What type of condition to sample, e.g., "initial"

        Returns:
            None or str: Error message, if any
        """
        for room_type in filtered_object_scope:
            # For each room_type, filter in room_inst that has successful
            # sampling options for all obj_inst in this room_type
            room_inst_satisfied = set.intersection(
                *[
                    set(filtered_object_scope[room_type][obj_inst].keys())
                    for obj_inst in filtered_object_scope[room_type]
                ]
            )

            if len(room_inst_satisfied) == 0:
                error_msg = "{}: Room type [{}] of scene [{}] do not contain or cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
                    condition_type, room_type, self._scene_model
                )
                for obj_inst in filtered_object_scope[room_type]:
                    error_msg += (
                        "{}: ".format(obj_inst) + ", ".join(filtered_object_scope[room_type][obj_inst].keys()) + "\n"
                    )

                return error_msg

            for obj_inst in filtered_object_scope[room_type]:
                filtered_object_scope[room_type][obj_inst] = {
                    key: val
                    for key, val in filtered_object_scope[room_type][obj_inst].items()
                    if key in room_inst_satisfied
                }

    def _filter_model_choices_by_attached_states(self, model_choices, category, obj_inst):
        # If obj_inst is a child object that depends on a parent object that has been imported or exists in the scene,
        # we filter in only models that match the parent object's attachment metalinks.
        if obj_inst in self._attached_objects:
            parent_insts = self._attached_objects[obj_inst]
            parent_objects = []
            for parent_inst in parent_insts:
                # If parent_inst is not an inroom object, it must be a non-sampleable object that has already been imported.
                # Grab it from the object_scope
                if parent_inst not in self._inroom_object_instances:
                    assert self._object_scope[parent_inst] is not None
                    parent_objects.append([self._object_scope[parent_inst].wrapped_obj])
                # If parent_inst is an inroom object, it can refer to multiple objects in the scene in different rooms.
                # We gather all of them and require that the model choice supports attachment to at least one of them.
                else:
                    for _, parent_inst_to_parent_objs in self._inroom_object_scope.items():
                        if parent_inst in parent_inst_to_parent_objs:
                            parent_objects.append(sum(parent_inst_to_parent_objs[parent_inst].values(), []))

            # Help function to check if a child object can attach to a parent object
            def can_attach(child_attachment_links, parent_attachment_links):
                for child_link_name in child_attachment_links:
                    child_category = child_link_name.split("_")[1]
                    if child_category.endswith("F"):
                        continue
                    assert child_category.endswith("M")
                    parent_category = child_category[:-1] + "F"
                    for parent_link_name in parent_attachment_links:
                        if parent_category in parent_link_name:
                            return True
                return False

            # Filter out models that don't support the attached states
            new_model_choices = set()
            for model_choice in model_choices:
                child_attachment_links = get_attachment_metalinks(category, model_choice)
                # The child model choice needs to be able to attach to all parent instances.
                # For in-room parent instances, there might be multiple parent objects (e.g. different wall nails),
                # and the child object needs to be able to attach to at least one of them.
                if all(
                    any(
                        can_attach(
                            child_attachment_links, get_attachment_metalinks(parent_obj.category, parent_obj.model)
                        )
                        for parent_obj in parent_objs_per_inst
                    )
                    for parent_objs_per_inst in parent_objects
                ):
                    new_model_choices.add(model_choice)

            return new_model_choices

        # If obj_inst is a prent object that other objects depend on, we filter in only models that have at least some
        # attachment links.
        elif any(obj_inst in parents for parents in self._attached_objects.values()):
            # Filter out models that don't support the attached states
            new_model_choices = set()
            for model_choice in model_choices:
                if len(get_attachment_metalinks(category, model_choice)) > 0:
                    new_model_choices.add(model_choice)
            return new_model_choices

        # If neither of the above cases apply, we don't need to filter the model choices
        else:
            return model_choices

    def _import_sampleable_objects(self):
        """
        Import all objects that can be sampled

        Args:
            env (Environment): Current active environment instance
        """
        assert og.sim.is_stopped(), "Simulator should be stopped when importing sampleable objects"

        # Move the robot object frame to a far away location, similar to other newly imported objects below
        self._agent.set_position_orientation(
            position=th.tensor([300, 300, 300], dtype=th.float32), orientation=th.tensor([0, 0, 0, 1], dtype=th.float32)
        )

        self._sampled_objects = set()
        num_new_obj = 0
        # Only populate self.object_scope for sampleable objects
        available_categories = set(get_all_object_categories())

        # Attached states introduce dependencies among objects during import time.
        # For example, when importing a child object instance, we need to make sure the imported model can be attached
        # to the parent object instance. We sort the object instances such that parent object instances are imported
        # before child object instances.
        dependencies = {key: self._attached_objects.get(key, {}) for key in self._object_instance_to_synset.keys()}
        for obj_inst in list(reversed(list(nx.algorithms.topological_sort(nx.DiGraph(dependencies))))):
            obj_synset = self._object_instance_to_synset[obj_inst]

            # Don't populate agent
            if obj_synset == "agent.n.01":
                continue

            # Populate based on whether it's a substance or not
            if is_substance_synset(obj_synset):
                assert len(self._activity_conditions.parsed_objects[obj_synset]) == 1, "Systems are singletons"
                obj_inst = self._activity_conditions.parsed_objects[obj_synset][0]
                system_name = OBJECT_TAXONOMY.get_subtree_substances(obj_synset)[0]
                self._object_scope[obj_inst] = BDDLEntity(
                    bddl_inst=obj_inst,
                    entity=(
                        None if obj_inst in self._future_obj_instances else self._env.scene.get_system(system_name)
                    ),
                )
            else:
                valid_categories = set(OBJECT_TAXONOMY.get_subtree_categories(obj_synset))
                categories = list(valid_categories.intersection(available_categories))
                if len(categories) == 0:
                    return (
                        f"None of the following categories could be found in the dataset for synset {obj_synset}: "
                        f"{valid_categories}"
                    )

                # Don't explicitly sample if future
                if obj_inst in self._future_obj_instances:
                    self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst)
                    continue
                # Don't sample if already in room
                if obj_inst in self._inroom_object_instances:
                    continue

                # Shuffle categories and sample to find a valid model
                random.shuffle(categories)
                model_choices = set()
                for category in categories:
                    # Get all available models that support all of its synset abilities
                    model_choices = set(
                        get_all_object_category_models_with_abilities(
                            category=category,
                            abilities=OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(category)),
                        )
                    )
                    model_choices = (
                        model_choices
                        if category not in GOOD_MODELS
                        else model_choices.intersection(GOOD_MODELS[category])
                    )
                    model_choices -= BAD_CLOTH_MODELS.get(category, set())
                    model_choices = self._filter_model_choices_by_attached_states(model_choices, category, obj_inst)
                    if len(model_choices) > 0:
                        break

                if len(model_choices) == 0:
                    # We failed to find ANY valid model across ALL valid categories
                    return f"Missing valid object models for all categories: {categories}"

                # Randomly select an object model
                model = random.choice(list(model_choices))

                # Potentially add additional kwargs
                obj_kwargs = dict()

                obj_kwargs["bounding_box"] = GOOD_BBOXES.get(category, dict()).get(model, None)

                # create the object
                simulator_obj = DatasetObject(
                    name=f"{category}_{len(self._env.scene.objects)}",
                    category=category,
                    model=model,
                    prim_type=(
                        PrimType.CLOTH if "cloth" in OBJECT_TAXONOMY.get_abilities(obj_synset) else PrimType.RIGID
                    ),
                    **obj_kwargs,
                )
                num_new_obj += 1

                # Load the object into the simulator
                self._env.scene.add_object(simulator_obj)

                # Set these objects to be far-away locations
                simulator_obj.set_position_orientation(
                    position=th.tensor([100.0, 100.0, -100.0]) + th.ones(3) * num_new_obj * 5.0
                )

                self._sampled_objects.add(simulator_obj)
                self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=simulator_obj)

        og.sim.play()
        og.sim.stop()

    def _sample_initial_conditions(self):
        """
        Sample initial conditions

        Returns:
            None or str: If successful, returns None. Otherwise, returns an error message
        """
        error_msg, self._inroom_object_scope_filtered_initial = self._sample_conditions(
            self._inroom_object_scope, self._inroom_object_conditions, "initial"
        )
        return error_msg

    def _sample_goal_conditions(self):
        """
        Sample goal conditions

        Returns:
            None or str: If successful, returns None. Otherwise, returns an error message
        """
        activity_goal_conditions = get_goal_conditions(self._activity_conditions, self._backend, self._object_scope)
        ground_goal_state_options = get_ground_goal_state_options(
            self._activity_conditions, self._backend, self._object_scope, activity_goal_conditions
        )
        num_options = ground_goal_state_options.size(0)
        ground_goal_state_options = ground_goal_state_options[random.sample(range(num_options), num_options)]
        log.debug(("number of ground_goal_state_options", len(ground_goal_state_options)))
        num_goal_condition_set_to_test = 10

        goal_condition_success = False
        # Try to fulfill different set of ground goal conditions (maximum num_goal_condition_set_to_test)
        for goal_condition_set in ground_goal_state_options[:num_goal_condition_set_to_test]:
            goal_condition_processed = []
            for condition in goal_condition_set:
                condition, positive = process_single_condition(condition)
                if condition is None:
                    continue
                goal_condition_processed.append((condition, positive))

            error_msg, _ = self._sample_conditions(
                self._inroom_object_scope_filtered_initial, goal_condition_processed, "goal"
            )
            if not error_msg:
                # if one set of goal conditions (and initial conditions) are satisfied, sampling is successful
                goal_condition_success = True
                break

        if not goal_condition_success:
            return error_msg

    def _sample_initial_conditions_final(self):
        """
        Sample final initial conditions

        Returns:
            None or str: If successful, returns None. Otherwise, returns an error message
        """
        # Sample kinematics first, then particle states, then unary states
        state = og.sim.dump_state(serialized=False)
        for group in ("kinematic", "particle", "unary"):
            log.info(f"Sampling {group} states...")
            if len(self._object_sampling_orders[group]) > 0:
                for cur_batch in self._object_sampling_orders[group]:
                    conditions_to_sample = []
                    for condition, positive in self._object_sampling_conditions[group]:
                        # Sample conditions that involve the current batch of objects
                        child_scope_name = condition.body[0]
                        if child_scope_name in cur_batch:
                            entity = self._object_scope[child_scope_name]
                            conditions_to_sample.append((condition, positive, entity, child_scope_name))

                    # If we're sampling kinematics, sort children based on (a) whether they are cloth or not, and then
                    # (b) their AABB, so that first all rigid objects are sampled before cloth objects, and within each
                    # group the larger objects are sampled first
                    if group == "kinematic":
                        rigid_conditions = [c for c in conditions_to_sample if c[2].prim_type != PrimType.CLOTH]
                        cloth_conditions = [c for c in conditions_to_sample if c[2].prim_type == PrimType.CLOTH]
                        conditions_to_sample = list(
                            reversed(sorted(rigid_conditions, key=lambda x: th.prod(x[2].aabb_extent)))
                        ) + list(reversed(sorted(cloth_conditions, key=lambda x: th.prod(x[2].aabb_extent))))

                    # Sample!
                    for condition, positive, entity, child_scope_name in conditions_to_sample:
                        success = False

                        kwargs = dict()
                        # Reset if we're sampling a kinematic state
                        if condition.STATE_NAME in {"inside", "ontop", "under"}:
                            kwargs["reset_before_sampling"] = True
                        elif condition.STATE_NAME in {"attached"}:
                            kwargs["bypass_alignment_checking"] = True
                            kwargs["check_physics_stability"] = True
                            kwargs["can_joint_break"] = False

                        while True:
                            num_trials = 1
                            for _ in range(num_trials):
                                success = condition.sample(binary_state=positive, **kwargs)
                                if success:
                                    # Update state
                                    state = og.sim.dump_state(serialized=False)
                                    break
                            if success:
                                # After the final round of kinematic sampling, we assign in_rooms to newly imported objects
                                if group == "kinematic":
                                    parent = self._object_scope[condition.body[1]]
                                    entity.in_rooms = deepcopy(parent.in_rooms)

                                # Can terminate immediately
                                break

                            # Can't re-sample non-kinematics or rescale cloth or agent, so in
                            # those cases terminate immediately
                            if (
                                group != "kinematic"
                                or condition.STATE_NAME == "attached"
                                or "agent" in child_scope_name
                                or entity.prim_type == PrimType.CLOTH
                            ):
                                break

                            # If any scales are equal or less than the lower threshold, terminate immediately
                            new_scale = entity.scale - m.DYNAMIC_SCALE_INCREMENT
                            if th.any(new_scale < m.MIN_DYNAMIC_SCALE):
                                break

                            # Re-scale and re-attempt
                            # Re-scaling is not respected unless sim cycle occurs
                            og.sim.stop()
                            entity.scale = new_scale
                            log.info(
                                f"Kinematic sampling {condition.STATE_NAME} {condition.body} failed, rescaling obj: {child_scope_name} to {entity.scale}"
                            )
                            og.sim.play()
                            og.sim.load_state(state, serialized=False)
                            og.sim.step_physics()
                        if not success:
                            # Update object registry because we just assigned in_rooms to newly imported objects
                            self._env.scene.object_registry.update(keys=["in_rooms"])
                            return f"Sampleable object conditions failed: {condition.STATE_NAME} {condition.body}"

        # Update object registry because we just assigned in_rooms to newly imported objects
        self._env.scene.object_registry.update(keys=["in_rooms"])

        # One more sim step to make sure the object states are propagated correctly
        # E.g. after sampling Filled.set_value(True), Filled.get_value() will become True only after one step
        og.sim.step()

    def _sample_conditions(self, input_object_scope, conditions, condition_type):
        """
        Sample conditions

        Args:
            input_object_scope (dict):
            conditions (list): List of conditions to filter scope with, where each list entry is
                a tuple of (condition, positive), where @positive is True if the condition has a positive
                evaluation.
            condition_type (str): What type of condition to sample, e.g., "initial"

        Returns:
            None or str: If successful, returns None. Otherwise, returns an error message
        """
        error_msg, problematic_objs = "", []
        while not any(
            th.any(self._object_scope[obj_inst].scale < m.MIN_DYNAMIC_SCALE).item() for obj_inst in problematic_objs
        ):
            filtered_object_scope, problematic_objs = self._filter_object_scope(
                input_object_scope, conditions, condition_type
            )
            error_msg = self._consolidate_room_instance(filtered_object_scope, condition_type)
            if error_msg is None:
                break
            # Re-scaling is not respected unless sim cycle occurs
            og.sim.stop()
            for obj_inst in problematic_objs:
                obj = self._object_scope[obj_inst]
                # If the object's initial condition is attachment, or it's agent or cloth, we can't / shouldn't scale
                # down, so play again and then terminate immediately
                if obj_inst in self._attached_objects or "agent" in obj_inst or obj.prim_type == PrimType.CLOTH:
                    og.sim.play()
                    return error_msg, None
                assert th.all(obj.scale > m.DYNAMIC_SCALE_INCREMENT)
                obj.scale -= m.DYNAMIC_SCALE_INCREMENT
            og.sim.play()

        if error_msg:
            return error_msg, None
        return self._maximum_bipartite_matching(filtered_object_scope, condition_type), filtered_object_scope

    def _maximum_bipartite_matching(self, filtered_object_scope, condition_type):
        """
        Matches objects from @filtered_object_scope to specific room instances it can be
        sampled from

        Args:
            filtered_object_scope (dict): Filtered object scope
            condition_type (str): What type of condition to sample, e.g., "initial"

        Returns:
            None or str: If successful, returns None. Otherwise, returns an error message
        """
        # For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects
        # Left nodes: a list of object instance in scope
        # Right nodes: a list of simulator objects
        # Edges: if the simulator object can support the sampling requirement of ths object instance
        for room_type in filtered_object_scope:
            # The same room instances will be shared across all scene obj in a given room type
            some_obj = list(filtered_object_scope[room_type].keys())[0]
            room_insts = list(filtered_object_scope[room_type][some_obj].keys())
            success = False
            # Loop through each room instance
            for room_inst in room_insts:
                graph = nx.Graph()
                # For this given room instance, gether mapping from obj instance to a list of simulator obj
                obj_inst_to_obj_per_room_inst = {}
                for obj_inst in filtered_object_scope[room_type]:
                    obj_inst_to_obj_per_room_inst[obj_inst] = filtered_object_scope[room_type][obj_inst][room_inst]
                top_nodes = []
                log_msg = "MBM for room instance [{}]".format(room_inst)
                log.debug((log_msg))
                for obj_inst in obj_inst_to_obj_per_room_inst:
                    for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
                        # Create an edge between obj instance and each of the simulator obj that supports sampling
                        graph.add_edge(obj_inst, obj)
                        log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
                        log.debug((log_msg))
                        top_nodes.append(obj_inst)
                # Need to provide top_nodes that contain all nodes in one bipartite node set
                # The matches will have two items for each match (e.g. A -> B, B -> A)
                matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
                if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
                    log.debug(("Object scope finalized:"))
                    for obj_inst, obj in matches.items():
                        if obj_inst in obj_inst_to_obj_per_room_inst:
                            self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=obj)
                            log.debug((obj_inst, obj.name))
                    success = True
                    break
            if not success:
                return "{}: Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format(
                    condition_type, room_type, self._scene_model
                )

sample(validate_goal=False)

Run sampling for this BEHAVIOR task

Parameters:

Name Type Description Default
validate_goal bool

Whether the goal should be validated or not

False

Returns:

Type Description
2 - tuple
  • bool: Whether sampling was successful or not
  • None or str: None if successful, otherwise the associated error message
Source code in omnigibson/utils/bddl_utils.py
def sample(self, validate_goal=False):
    """
    Run sampling for this BEHAVIOR task

    Args:
        validate_goal (bool): Whether the goal should be validated or not

    Returns:
        2-tuple:
            - bool: Whether sampling was successful or not
            - None or str: None if successful, otherwise the associated error message
    """
    log.info("Sampling task...")
    # Reject scenes with missing non-sampleable objects
    # Populate object_scope with sampleable objects and the robot
    accept_scene, feedback = self._prepare_scene_for_sampling()
    if not accept_scene:
        return accept_scene, feedback
    # Sample objects to satisfy initial conditions
    accept_scene, feedback = self._sample_all_conditions(validate_goal=validate_goal)
    if not accept_scene:
        return accept_scene, feedback

    log.info("Sampling succeeded!")

    return True, None

process_single_condition(condition)

Processes a single BDDL condition

Parameters:

Name Type Description Default
condition Condition

Condition to process

required

Returns:

Type Description
2 - tuple
  • Expression: Condition's expression
  • bool: Whether this evaluated condition is positive or negative
Source code in omnigibson/utils/bddl_utils.py
def process_single_condition(condition):
    """
    Processes a single BDDL condition

    Args:
        condition (Condition): Condition to process

    Returns:
        2-tuple:
            - Expression: Condition's expression
            - bool: Whether this evaluated condition is positive or negative
    """
    if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula):
        log.debug(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
        return None, None

    if isinstance(condition.children[0], Negation):
        condition = condition.children[0].children[0]
        positive = False
    else:
        condition = condition.children[0]
        positive = True

    return condition, positive

translate_bddl_recipe_to_og_recipe(name, input_synsets, output_synsets, input_states=None, output_states=None, fillable_synsets=None, heatsource_synsets=None, timesteps=None)

Translate a BDDL recipe to an OG recipe. Args: name (str): Name of the recipe input_synsets (dict): Maps synsets to number of instances required for the recipe output_synsets (dict): Maps synsets to number of instances to be spawned in the container when the recipe executes input_states (dict or None): Maps input synsets to states that must be satisfied for the recipe to execute, or None if no states are required otuput_states (dict or None): Map output synsets to states that should be set when spawned when the recipe executes, or None if no states are required fillable_synsets (None or set of str): If specified, set of fillable synsets which are allowed for this recipe. If None, any fillable is allowed heatsource_synsets (None or set of str): If specified, set of heatsource synsets which are allowed for this recipe. If None, any heatsource is allowed timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None, it will be set to be 1, i.e.: instantaneous execution

Source code in omnigibson/utils/bddl_utils.py
def translate_bddl_recipe_to_og_recipe(
    name,
    input_synsets,
    output_synsets,
    input_states=None,
    output_states=None,
    fillable_synsets=None,
    heatsource_synsets=None,
    timesteps=None,
):
    """
    Translate a BDDL recipe to an OG recipe.
    Args:
        name (str): Name of the recipe
        input_synsets (dict): Maps synsets to number of instances required for the recipe
        output_synsets (dict): Maps synsets to number of instances to be spawned in the container when the recipe executes
        input_states (dict or None): Maps input synsets to states that must be satisfied for the recipe to execute,
            or None if no states are required
        otuput_states (dict or None): Map output synsets to states that should be set when spawned when the recipe executes,
            or None if no states are required
        fillable_synsets (None or set of str): If specified, set of fillable synsets which are allowed for this recipe.
            If None, any fillable is allowed
        heatsource_synsets (None or set of str): If specified, set of heatsource synsets which are allowed for this recipe.
            If None, any heatsource is allowed
        timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None,
            it will be set to be 1, i.e.: instantaneous execution
    """
    og_recipe = {
        "name": name,
        # Maps object categories to number of instances required for the recipe
        "input_objects": dict(),
        # List of system names required for the recipe
        "input_systems": list(),
        # Maps object categories to number of instances to be spawned in the container when the recipe executes
        "output_objects": dict(),
        # List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
        "output_systems": list(),
        # Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
        "input_states": defaultdict(lambda: defaultdict(list)),
        # Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
        "output_states": defaultdict(lambda: defaultdict(list)),
        # Set of fillable categories which are allowed for this recipe
        "fillable_categories": None,
        # Set of heatsource categories which are allowed for this recipe
        "heatsource_categories": None,
        # Number of subsequent heating steps required for the recipe to execute
        "timesteps": timesteps if timesteps is not None else 1,
    }

    _populate_input_output_objects_systems(
        og_recipe=og_recipe, input_synsets=input_synsets, output_synsets=output_synsets
    )
    _populate_input_output_states(og_recipe=og_recipe, input_states=input_states, output_states=output_states)
    _populate_filter_categories(og_recipe=og_recipe, filter_name="fillable", synsets=fillable_synsets)
    _populate_filter_categories(og_recipe=og_recipe, filter_name="heatsource", synsets=heatsource_synsets)

    return og_recipe

translate_bddl_washer_rule_to_og_washer_rule(conditions)

Translate BDDL washer rule to OG washer rule.

Parameters:

Name Type Description Default
conditions dict

Dictionary mapping the synset of ParticleSystem (str) to None or list of synsets of ParticleSystem (str). None represents "never", empty list represents "always", or non-empty list represents at least one of the systems in the list needs to be present in the washer for the key system to be removed. E.g. "rust.n.01" -> None: "never remove rust.n.01 from the washer" E.g. "dust.n.01" -> []: "always remove dust.n.01 from the washer" E.g. "cooking_oil.n.01" -> ["sodium_carbonate.n.01", "vinegar.n.01"]: "remove cooking_oil.n.01 from the washer if either sodium_carbonate.n.01 or vinegar.n.01 is present" For keys not present in the dictionary, the default is []: "always remove"

required

Returns: dict: Dictionary mapping the system name (str) to None or list of system names (str). None represents "never", empty list represents "always", or non-empty list represents at least one of the systems in the list needs to be present in the washer for the key system to be removed.

Source code in omnigibson/utils/bddl_utils.py
def translate_bddl_washer_rule_to_og_washer_rule(conditions):
    """
    Translate BDDL washer rule to OG washer rule.

    Args:
        conditions (dict): Dictionary mapping the synset of ParticleSystem (str) to None or list of synsets of
            ParticleSystem (str). None represents "never", empty list represents "always", or non-empty list represents
            at least one of the systems in the list needs to be present in the washer for the key system to be removed.
            E.g. "rust.n.01" -> None: "never remove rust.n.01 from the washer"
            E.g. "dust.n.01" -> []: "always remove dust.n.01 from the washer"
            E.g. "cooking_oil.n.01" -> ["sodium_carbonate.n.01", "vinegar.n.01"]: "remove cooking_oil.n.01 from the
            washer if either sodium_carbonate.n.01 or vinegar.n.01 is present"
            For keys not present in the dictionary, the default is []: "always remove"
    Returns:
        dict: Dictionary mapping the system name (str) to None or list of system names (str). None represents "never",
            empty list represents "always", or non-empty list represents at least one of the systems in the list needs
            to be present in the washer for the key system to be removed.
    """
    og_washer_rule = dict()
    for solute, solvents in conditions.items():
        assert OBJECT_TAXONOMY.is_leaf(solute), f"Synset {solute} must be a leaf node in the taxonomy!"
        assert is_substance_synset(solute), f"Synset {solute} must be a substance synset!"
        solute_name = get_system_name_by_synset(solute)
        if solvents is None:
            og_washer_rule[solute_name] = None
        else:
            solvent_names = []
            for solvent in solvents:
                assert OBJECT_TAXONOMY.is_leaf(solvent), f"Synset {solvent} must be a leaf node in the taxonomy!"
                assert is_substance_synset(solvent), f"Synset {solvent} must be a substance synset!"
                solvent_name = get_system_name_by_synset(solvent)
                solvent_names.append(solvent_name)
            og_washer_rule[solute_name] = solvent_names
    return og_washer_rule