summaryrefslogtreecommitdiff
path: root/keystone/identity/core.py
blob: 5168bf01fdf30cbdd56244f52e1269fdcfb89b36 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Main entry point into the Identity service."""

import functools
import operator
import os
import threading
import uuid

from oslo_config import cfg
from oslo_log import log
from pycadf import reason

from keystone import assignment  # TODO(lbragstad): Decouple this dependency
from keystone.common import cache
from keystone.common import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common.validation import validators
import keystone.conf
from keystone import exception
from keystone.i18n import _, _LW
from keystone.identity.mapping_backends import mapping
from keystone import notifications
from oslo_utils import timeutils


CONF = keystone.conf.CONF

LOG = log.getLogger(__name__)

MEMOIZE = cache.get_memoization_decorator(group='identity')

ID_MAPPING_REGION = cache.create_region(name='id mapping')
MEMOIZE_ID_MAPPING = cache.get_memoization_decorator(group='identity',
                                                     region=ID_MAPPING_REGION)

DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'

# The number of times we will attempt to register a domain to use the SQL
# driver, if we find that another process is in the middle of registering or
# releasing at the same time as us.
REGISTRATION_ATTEMPTS = 10

# Config Registration Types
SQL_DRIVER = 'SQL'


@dependency.requires('domain_config_api', 'resource_api')
class DomainConfigs(dict):
    """Discover, store and provide access to domain specific configs.

    The setup_domain_drivers() call will be made via the wrapper from
    the first call to any driver function handled by this manager.

    Domain specific configurations are only supported for the identity backend
    and the individual configurations are either specified in the resource
    database or in individual domain configuration files, depending on the
    setting of the 'domain_configurations_from_database' config option.

    The result will be that for each domain with a specific configuration,
    this class will hold a reference to a ConfigOpts and driver object that
    the identity manager and driver can use.

    """

    configured = False
    driver = None
    _any_sql = False
    lock = threading.Lock()

    def _load_driver(self, domain_config):
        return manager.load_driver(Manager.driver_namespace,
                                   domain_config['cfg'].identity.driver,
                                   domain_config['cfg'])

    def _load_config_from_file(self, resource_api, file_list, domain_name):

        def _assert_no_more_than_one_sql_driver(new_config, config_file):
            """Ensure there is no more than one sql driver.

            Check to see if the addition of the driver in this new config
            would cause there to be more than one sql driver.

            """
            if (new_config['driver'].is_sql and
                    (self.driver.is_sql or self._any_sql)):
                # The addition of this driver would cause us to have more than
                # one sql driver, so raise an exception.
                raise exception.MultipleSQLDriversInConfig(source=config_file)
            self._any_sql = self._any_sql or new_config['driver'].is_sql

        try:
            domain_ref = resource_api.get_domain_by_name(domain_name)
        except exception.DomainNotFound:
            LOG.warning(
                _LW('Invalid domain name (%s) found in config file name'),
                domain_name)
            return

        # Create a new entry in the domain config dict, which contains
        # a new instance of both the conf environment and driver using
        # options defined in this set of config files.  Later, when we
        # service calls via this Manager, we'll index via this domain
        # config dict to make sure we call the right driver
        domain_config = {}
        domain_config['cfg'] = cfg.ConfigOpts()
        keystone.conf.configure(conf=domain_config['cfg'])
        domain_config['cfg'](args=[], project='keystone',
                             default_config_files=file_list,
                             default_config_dirs=[])
        domain_config['driver'] = self._load_driver(domain_config)
        _assert_no_more_than_one_sql_driver(domain_config, file_list)
        self[domain_ref['id']] = domain_config

    def _setup_domain_drivers_from_files(self, standard_driver, resource_api):
        """Read the domain specific configuration files and load the drivers.

        Domain configuration files are stored in the domain config directory,
        and must be named of the form:

        keystone.<domain_name>.conf

        For each file, call the load config method where the domain_name
        will be turned into a domain_id and then:

        - Create a new config structure, adding in the specific additional
          options defined in this config file
        - Initialise a new instance of the required driver with this new config

        """
        conf_dir = CONF.identity.domain_config_dir
        if not os.path.exists(conf_dir):
            LOG.warning(_LW('Unable to locate domain config directory: %s'),
                        conf_dir)
            return

        for r, d, f in os.walk(conf_dir):
            for fname in f:
                if (fname.startswith(DOMAIN_CONF_FHEAD) and
                        fname.endswith(DOMAIN_CONF_FTAIL)):
                    if fname.count('.') >= 2:
                        self._load_config_from_file(
                            resource_api, [os.path.join(r, fname)],
                            fname[len(DOMAIN_CONF_FHEAD):
                                  -len(DOMAIN_CONF_FTAIL)])
                    else:
                        LOG.debug(('Ignoring file (%s) while scanning domain '
                                   'config directory'),
                                  fname)

    def _load_config_from_database(self, domain_id, specific_config):

        def _assert_no_more_than_one_sql_driver(domain_id, new_config):
            """Ensure adding driver doesn't push us over the limit of 1.

            The checks we make in this method need to take into account that
            we may be in a multiple process configuration and ensure that
            any race conditions are avoided.

            """
            if not new_config['driver'].is_sql:
                self.domain_config_api.release_registration(domain_id)
                return

            # To ensure the current domain is the only SQL driver, we attempt
            # to register our use of SQL. If we get it we know we are good,
            # if we fail to register it then we should:
            #
            # - First check if another process has registered for SQL for our
            #   domain, in which case we are fine
            # - If a different domain has it, we should check that this domain
            #   is still valid, in case, for example, domain deletion somehow
            #   failed to remove its registration (i.e. we self heal for these
            #   kinds of issues).

            domain_registered = 'Unknown'
            for attempt in range(REGISTRATION_ATTEMPTS):
                if self.domain_config_api.obtain_registration(
                        domain_id, SQL_DRIVER):
                    LOG.debug('Domain %s successfully registered to use the '
                              'SQL driver.', domain_id)
                    return

                # We failed to register our use, let's find out who is using it
                try:
                    domain_registered = (
                        self.domain_config_api.read_registration(
                            SQL_DRIVER))
                except exception.ConfigRegistrationNotFound:
                    msg = ('While attempting to register domain %(domain)s to '
                           'use the SQL driver, another process released it, '
                           'retrying (attempt %(attempt)s).')
                    LOG.debug(msg, {'domain': domain_id,
                                    'attempt': attempt + 1})
                    continue

                if domain_registered == domain_id:
                    # Another process already registered it for us, so we are
                    # fine. In the race condition when another process is
                    # in the middle of deleting this domain, we know the domain
                    # is already disabled and hence telling the caller that we
                    # are registered is benign.
                    LOG.debug('While attempting to register domain %s to use '
                              'the SQL driver, found that another process had '
                              'already registered this domain. This is normal '
                              'in multi-process configurations.', domain_id)
                    return

                # So we don't have it, but someone else does...let's check that
                # this domain is still valid
                try:
                    self.resource_api.get_domain(domain_registered)
                except exception.DomainNotFound:
                    msg = ('While attempting to register domain %(domain)s to '
                           'use the SQL driver, found that it was already '
                           'registered to a domain that no longer exists '
                           '(%(old_domain)s). Removing this stale '
                           'registration and retrying (attempt %(attempt)s).')
                    LOG.debug(msg, {'domain': domain_id,
                                    'old_domain': domain_registered,
                                    'attempt': attempt + 1})
                    self.domain_config_api.release_registration(
                        domain_registered, type=SQL_DRIVER)
                    continue

                # The domain is valid, so we really do have an attempt at more
                # than one SQL driver.
                details = (
                    _('Config API entity at /domains/%s/config') % domain_id)
                raise exception.MultipleSQLDriversInConfig(source=details)

            # We fell out of the loop without either registering our domain or
            # being able to find who has it...either we were very very very
            # unlucky or something is awry.
            msg = _('Exceeded attempts to register domain %(domain)s to use '
                    'the SQL driver, the last domain that appears to have '
                    'had it is %(last_domain)s, giving up') % {
                        'domain': domain_id, 'last_domain': domain_registered}
            raise exception.UnexpectedError(msg)

        domain_config = {}
        domain_config['cfg'] = cfg.ConfigOpts()
        keystone.conf.configure(conf=domain_config['cfg'])
        domain_config['cfg'](args=[], project='keystone',
                             default_config_files=[],
                             default_config_dirs=[])

        # Override any options that have been passed in as specified in the
        # database.
        for group in specific_config:
            for option in specific_config[group]:
                domain_config['cfg'].set_override(
                    option, specific_config[group][option],
                    group, enforce_type=True)

        domain_config['cfg_overrides'] = specific_config
        domain_config['driver'] = self._load_driver(domain_config)
        _assert_no_more_than_one_sql_driver(domain_id, domain_config)
        self[domain_id] = domain_config

    def _setup_domain_drivers_from_database(self, standard_driver,
                                            resource_api):
        """Read domain specific configuration from database and load drivers.

        Domain configurations are stored in the domain-config backend,
        so we go through each domain to find those that have a specific config
        defined, and for those that do we:

        - Create a new config structure, overriding any specific options
          defined in the resource backend
        - Initialise a new instance of the required driver with this new config

        """
        for domain in resource_api.list_domains():
            domain_config_options = (
                self.domain_config_api.
                get_config_with_sensitive_info(domain['id']))
            if domain_config_options:
                self._load_config_from_database(domain['id'],
                                                domain_config_options)

    def setup_domain_drivers(self, standard_driver, resource_api):
        # This is called by the api call wrapper
        self.driver = standard_driver

        if CONF.identity.domain_configurations_from_database:
            self._setup_domain_drivers_from_database(standard_driver,
                                                     resource_api)
        else:
            self._setup_domain_drivers_from_files(standard_driver,
                                                  resource_api)
        self.configured = True

    def get_domain_driver(self, domain_id):
        self.check_config_and_reload_domain_driver_if_required(domain_id)
        if domain_id in self:
            return self[domain_id]['driver']

    def get_domain_conf(self, domain_id):
        self.check_config_and_reload_domain_driver_if_required(domain_id)
        if domain_id in self:
            return self[domain_id]['cfg']
        else:
            return CONF

    def reload_domain_driver(self, domain_id):
        # Only used to support unit tests that want to set
        # new config values.  This should only be called once
        # the domains have been configured, since it relies on
        # the fact that the configuration files/database have already been
        # read.
        if self.configured:
            if domain_id in self:
                self[domain_id]['driver'] = (
                    self._load_driver(self[domain_id]))
            else:
                # The standard driver
                self.driver = self.driver()

    def check_config_and_reload_domain_driver_if_required(self, domain_id):
        """Check for, and load, any new domain specific config for this domain.

        This is only supported for the database-stored domain specific
        configuration.

        When the domain specific drivers were set up, we stored away the
        specific config for this domain that was available at that time. So we
        now read the current version and compare. While this might seem
        somewhat inefficient, the sensitive config call is cached, so should be
        light weight. More importantly, when the cache timeout is reached, we
        will get any config that has been updated from any other keystone
        process.

        This cache-timeout approach works for both multi-process and
        multi-threaded keystone configurations. In multi-threaded
        configurations, even though we might remove a driver object (that
        could be in use by another thread), this won't actually be thrown away
        until all references to it have been broken. When that other
        thread is released back and is restarted with another command to
        process, next time it accesses the driver it will pickup the new one.

        """
        if (not CONF.identity.domain_specific_drivers_enabled or
                not CONF.identity.domain_configurations_from_database):
            # If specific drivers are not enabled, then there is nothing to do.
            # If we are not storing the configurations in the database, then
            # we'll only re-read the domain specific config files on startup
            # of keystone.
            return

        latest_domain_config = (
            self.domain_config_api.
            get_config_with_sensitive_info(domain_id))
        domain_config_in_use = domain_id in self

        if latest_domain_config:
            if (not domain_config_in_use or
                    latest_domain_config != self[domain_id]['cfg_overrides']):
                self._load_config_from_database(domain_id,
                                                latest_domain_config)
        elif domain_config_in_use:
            # The domain specific config has been deleted, so should remove the
            # specific driver for this domain.
            try:
                del self[domain_id]
            except KeyError:  # nosec
                # Allow this error in case we are unlucky and in a
                # multi-threaded situation, two threads happen to be running
                # in lock step.
                pass
        # If we fall into the else condition, this means there is no domain
        # config set, and there is none in use either, so we have nothing
        # to do.


def domains_configured(f):
    """Wrap API calls to lazy load domain configs after init.

    This is required since the assignment manager needs to be initialized
    before this manager, and yet this manager's init wants to be
    able to make assignment calls (to build the domain configs).  So
    instead, we check if the domains have been initialized on entry
    to each call, and if requires load them,

    """
    @functools.wraps(f)
    def wrapper(self, *args, **kwargs):
        if (not self.domain_configs.configured and
                CONF.identity.domain_specific_drivers_enabled):
            # If domain specific driver has not been configured, acquire the
            # lock and proceed with loading the driver.
            with self.domain_configs.lock:
                # Check again just in case some other thread has already
                # completed domain config.
                if not self.domain_configs.configured:
                    self.domain_configs.setup_domain_drivers(
                        self.driver, self.resource_api)
        return f(self, *args, **kwargs)
    return wrapper


def exception_translated(exception_type):
    """Wrap API calls to map to correct exception."""
    def _exception_translated(f):
        @functools.wraps(f)
        def wrapper(self, *args, **kwargs):
            try:
                return f(self, *args, **kwargs)
            except exception.PublicIDNotFound as e:
                if exception_type == 'user':
                    raise exception.UserNotFound(user_id=str(e))
                elif exception_type == 'group':
                    raise exception.GroupNotFound(group_id=str(e))
                elif exception_type == 'assertion':
                    raise AssertionError(_('Invalid user / password'))
                else:
                    raise
        return wrapper
    return _exception_translated


@notifications.listener
@dependency.provider('identity_api')
@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api',
                     'resource_api', 'revoke_api', 'shadow_users_api',
                     'federation_api')
class Manager(manager.Manager):
    """Default pivot point for the Identity backend.

    See :mod:`keystone.common.manager.Manager` for more details on how this
    dynamically calls the backend.

    This class also handles the support of domain specific backends, by using
    the DomainConfigs class. The setup call for DomainConfigs is called
    from with the @domains_configured wrapper in a lazy loading fashion
    to get around the fact that we can't satisfy the assignment api it needs
    from within our __init__() function since the assignment driver is not
    itself yet initialized.

    Each of the identity calls are pre-processed here to choose, based on
    domain, which of the drivers should be called. The non-domain-specific
    driver is still in place, and is used if there is no specific driver for
    the domain in question (or we are not using multiple domain drivers).

    Starting with Juno, in order to be able to obtain the domain from
    just an ID being presented as part of an API call, a public ID to domain
    and local ID mapping is maintained.  This mapping also allows for the local
    ID of drivers that do not provide simple UUIDs (such as LDAP) to be
    referenced via a public facing ID.  The mapping itself is automatically
    generated as entities are accessed via the driver.

    This mapping is only used when:
    - the entity is being handled by anything other than the default driver, or
    - the entity is being handled by the default LDAP driver and backward
    compatible IDs are not required.

    This means that in the standard case of a single SQL backend or the default
    settings of a single LDAP backend (since backward compatible IDs is set to
    True by default), no mapping is used. An alternative approach would be to
    always use the mapping table, but in the cases where we don't need it to
    make the public and local IDs the same. It is felt that not using the
    mapping by default is a more prudent way to introduce this functionality.

    """

    driver_namespace = 'keystone.identity'

    _USER = 'user'
    _GROUP = 'group'

    def __init__(self):
        super(Manager, self).__init__(CONF.identity.driver)
        self.domain_configs = DomainConfigs()
        notifications.register_event_callback(
            notifications.ACTIONS.internal, notifications.DOMAIN_DELETED,
            self._domain_deleted
        )
        self.event_callbacks = {}

    def _domain_deleted(self, service, resource_type, operation,
                        payload):
        domain_id = payload['resource_info']

        driver = self._select_identity_driver(domain_id)

        if not driver.is_sql:
            # The LDAP driver does not support deleting users or groups.
            # Moreover, we shouldn't destroy users and groups in an unknown
            # driver. The only time when we should delete users and groups is
            # when the backend is SQL because the foreign key in the SQL table
            # forces us to.
            return

        user_refs = self.list_users(domain_scope=domain_id)
        group_refs = self.list_groups(domain_scope=domain_id)

        for group in group_refs:
            # Cleanup any existing groups.
            try:
                self.delete_group(group['id'])
            except exception.GroupNotFound:
                LOG.debug(('Group %(groupid)s not found when deleting domain '
                           'contents for %(domainid)s, continuing with '
                           'cleanup.'),
                          {'groupid': group['id'], 'domainid': domain_id})

        # And finally, delete the users themselves
        for user in user_refs:
            try:
                self.delete_user(user['id'])
            except exception.UserNotFound:
                LOG.debug(('User %(userid)s not found when deleting domain '
                           'contents for %(domainid)s, continuing with '
                           'cleanup.'),
                          {'userid': user['id'], 'domainid': domain_id})

    # Domain ID normalization methods

    def _set_domain_id_and_mapping(self, ref, domain_id, driver,
                                   entity_type):
        """Patch the domain_id/public_id into the resulting entity(ies).

        :param ref: the entity or list of entities to post process
        :param domain_id: the domain scope used for the call
        :param driver: the driver used to execute the call
        :param entity_type: whether this is a user or group

        :returns: post processed entity or list or entities

        Called to post-process the entity being returned, using a mapping
        to substitute a public facing ID as necessary. This method must
        take into account:

        - If the driver is not domain aware, then we must set the domain
          attribute of all entities irrespective of mapping.
        - If the driver does not support UUIDs, then we always want to provide
          a mapping, except for the special case of this being the default
          driver and backward_compatible_ids is set to True. This is to ensure
          that entity IDs do not change for an existing LDAP installation (only
          single domain/driver LDAP configurations were previously supported).
        - If the driver does support UUIDs, then we always create a mapping
          entry, but use the local UUID as the public ID.  The exception to
          this is that if we just have single driver (i.e. not using specific
          multi-domain configs), then we don't bother with the mapping at all.

        """
        conf = CONF.identity

        if not self._needs_post_processing(driver):
            # a classic case would be when running with a single SQL driver
            return ref

        LOG.debug('ID Mapping - Domain ID: %(domain)s, '
                  'Default Driver: %(driver)s, '
                  'Domains: %(aware)s, UUIDs: %(generate)s, '
                  'Compatible IDs: %(compat)s',
                  {'domain': domain_id,
                   'driver': (driver == self.driver),
                   'aware': driver.is_domain_aware(),
                   'generate': driver.generates_uuids(),
                   'compat': CONF.identity_mapping.backward_compatible_ids})

        if isinstance(ref, dict):
            return self._set_domain_id_and_mapping_for_single_ref(
                ref, domain_id, driver, entity_type, conf)
        elif isinstance(ref, list):
            return self._set_domain_id_and_mapping_for_list(
                ref, domain_id, driver, entity_type, conf)
        else:
            raise ValueError(_('Expected dict or list: %s') % type(ref))

    def _needs_post_processing(self, driver):
        """Return whether entity from driver needs domain added or mapping."""
        return (driver is not self.driver or not driver.generates_uuids() or
                not driver.is_domain_aware())

    def _insert_new_public_id(self, local_entity, ref, driver):
        # Need to create a mapping. If the driver generates UUIDs
        # then pass the local UUID in as the public ID to use.
        public_id = None
        if driver.generates_uuids():
            public_id = ref['id']
        ref['id'] = self.id_mapping_api.create_id_mapping(
            local_entity, public_id)
        LOG.debug('Created new mapping to public ID: %s', ref['id'])

    def _set_domain_id_and_mapping_for_single_ref(self, ref, domain_id,
                                                  driver, entity_type, conf):
        LOG.debug('Local ID: %s', ref['id'])
        ref = ref.copy()

        self._insert_domain_id_if_needed(ref, driver, domain_id, conf)

        if self._is_mapping_needed(driver):
            local_entity = {'domain_id': ref['domain_id'],
                            'local_id': ref['id'],
                            'entity_type': entity_type}
            public_id = self.id_mapping_api.get_public_id(local_entity)
            if public_id:
                ref['id'] = public_id
                LOG.debug('Found existing mapping to public ID: %s',
                          ref['id'])
            else:
                self._insert_new_public_id(local_entity, ref, driver)
        return ref

    def _set_domain_id_and_mapping_for_list(self, ref_list, domain_id, driver,
                                            entity_type, conf):
        """Set domain id and mapping for a list of refs.

        The method modifies refs in-place.
        """
        if not ref_list:
            return []

        for r in ref_list:
            self._insert_domain_id_if_needed(r, driver, domain_id, conf)

        if not self._is_mapping_needed(driver):
            return ref_list

        # build a map of refs for fast look-up
        refs_map = {}
        for r in ref_list:
            refs_map[(r['id'], entity_type, r['domain_id'])] = r

        # NOTE(breton): there are cases when the driver is not domain aware and
        # no domain_id was explicitely provided for list operation. domain_id
        # gets inserted into refs, but not passed into this method. Lets use
        # domain_id from one of the refs.
        if not domain_id:
            domain_id = ref_list[0]['domain_id']

        # fetch all mappings for the domain, lookup the user at the map built
        # at previous step and replace his id.
        domain_mappings = self.id_mapping_api.get_domain_mapping_list(
            domain_id)
        for _mapping in domain_mappings:
            idx = (_mapping.local_id, _mapping.entity_type, _mapping.domain_id)
            try:
                ref = refs_map.pop(idx)
                # due to python specifics, `ref` still points to an item in
                # `ref_list`. That's why when we change it here, it gets
                # changed in `ref_list`.
                ref['id'] = _mapping.public_id
            except KeyError:
                pass  # some old entry, skip it

        # at this point, all known refs were granted a public_id. For the refs
        # left, there are no mappings. They need to be created.
        for ref in refs_map.values():
            local_entity = {'domain_id': ref['domain_id'],
                            'local_id': ref['id'],
                            'entity_type': entity_type}
            self._insert_new_public_id(local_entity, ref, driver)
        return ref_list

    def _insert_domain_id_if_needed(self, ref, driver, domain_id, conf):
        """Insert the domain ID into the ref, if required.

        If the driver can't handle domains, then we need to insert the
        domain_id into the entity being returned.  If the domain_id is
        None that means we are running in a single backend mode, so to
        remain backwardly compatible, we put in the default domain ID.
        """
        if not driver.is_domain_aware():
            if domain_id is None:
                domain_id = conf.default_domain_id
            ref['domain_id'] = domain_id

    def _is_mapping_needed(self, driver):
        """Return whether mapping is needed.

        There are two situations where we must use the mapping:
        - this isn't the default driver (i.e. multiple backends), or
        - we have a single backend that doesn't use UUIDs
        The exception to the above is that we must honor backward
        compatibility if this is the default driver (e.g. to support
        current LDAP)
        """
        is_not_default_driver = driver is not self.driver
        return (is_not_default_driver or (
            not driver.generates_uuids() and
            not CONF.identity_mapping.backward_compatible_ids))

    def _clear_domain_id_if_domain_unaware(self, driver, ref):
        """Clear domain_id details if driver is not domain aware."""
        if not driver.is_domain_aware() and 'domain_id' in ref:
            ref = ref.copy()
            ref.pop('domain_id')
        return ref

    def _select_identity_driver(self, domain_id):
        """Choose a backend driver for the given domain_id.

        :param domain_id: The domain_id for which we want to find a driver.  If
                          the domain_id is specified as None, then this means
                          we need a driver that handles multiple domains.

        :returns: chosen backend driver

        If there is a specific driver defined for this domain then choose it.
        If the domain is None, or there no specific backend for the given
        domain is found, then we chose the default driver.

        """
        if domain_id is None:
            driver = self.driver
        else:
            driver = (self.domain_configs.get_domain_driver(domain_id) or
                      self.driver)

        # If the driver is not domain aware (e.g. LDAP) then check to
        # ensure we are not mapping multiple domains onto it - the only way
        # that would happen is that the default driver is LDAP and the
        # domain is anything other than None or the default domain.
        if (not driver.is_domain_aware() and driver == self.driver and
            domain_id != CONF.identity.default_domain_id and
                domain_id is not None):
                    LOG.warning(_LW('Found multiple domains being mapped to a '
                                    'driver that does not support that (e.g. '
                                    'LDAP) - Domain ID: %(domain)s, '
                                    'Default Driver: %(driver)s'),
                                {'domain': domain_id,
                                 'driver': (driver == self.driver)})
                    raise exception.DomainNotFound(domain_id=domain_id)
        return driver

    def _get_domain_driver_and_entity_id(self, public_id):
        """Look up details using the public ID.

        :param public_id: the ID provided in the call

        :returns: domain_id, which can be None to indicate that the driver
                  in question supports multiple domains
                  driver selected based on this domain
                  entity_id which will is understood by the driver.

        Use the mapping table to look up the domain, driver and local entity
        that is represented by the provided public ID.  Handle the situations
        where we do not use the mapping (e.g. single driver that understands
        UUIDs etc.)

        """
        conf = CONF.identity
        # First, since we don't know anything about the entity yet, we must
        # assume it needs mapping, so long as we are using domain specific
        # drivers.
        if conf.domain_specific_drivers_enabled:
            local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
            if local_id_ref:
                return (
                    local_id_ref['domain_id'],
                    self._select_identity_driver(local_id_ref['domain_id']),
                    local_id_ref['local_id'])

        # So either we are using multiple drivers but the public ID is invalid
        # (and hence was not found in the mapping table), or the public ID is
        # being handled by the default driver.  Either way, the only place left
        # to look is in that standard driver. However, we don't yet know if
        # this driver also needs mapping (e.g. LDAP in non backward
        # compatibility mode).
        driver = self.driver
        if driver.generates_uuids():
            if driver.is_domain_aware:
                # No mapping required, and the driver can handle the domain
                # information itself.  The classic case of this is the
                # current SQL driver.
                return (None, driver, public_id)
            else:
                # Although we don't have any drivers of this type, i.e. that
                # understand UUIDs but not domains, conceptually you could.
                return (conf.default_domain_id, driver, public_id)

        # So the only place left to find the ID is in the default driver which
        # we now know doesn't generate UUIDs
        if not CONF.identity_mapping.backward_compatible_ids:
            # We are not running in backward compatibility mode, so we
            # must use a mapping.
            local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
            if local_id_ref:
                return (
                    local_id_ref['domain_id'],
                    driver,
                    local_id_ref['local_id'])
            else:
                raise exception.PublicIDNotFound(id=public_id)

        # If we reach here, this means that the default driver
        # requires no mapping - but also doesn't understand domains
        # (e.g. the classic single LDAP driver situation). Hence we pass
        # back the public_ID unmodified and use the default domain (to
        # keep backwards compatibility with existing installations).
        #
        # It is still possible that the public ID is just invalid in
        # which case we leave this to the caller to check.
        return (conf.default_domain_id, driver, public_id)

    def _assert_user_and_group_in_same_backend(
            self, user_entity_id, user_driver, group_entity_id, group_driver):
        """Ensure that user and group IDs are backed by the same backend.

        Raise a CrossBackendNotAllowed exception if they are not from the same
        backend, otherwise return None.

        """
        if user_driver is not group_driver:
            # Determine first if either IDs don't exist by calling
            # the driver.get methods (which will raise a NotFound
            # exception).
            user_driver.get_user(user_entity_id)
            group_driver.get_group(group_entity_id)
            # If we get here, then someone is attempting to create a cross
            # backend membership, which is not allowed.
            raise exception.CrossBackendNotAllowed(group_id=group_entity_id,
                                                   user_id=user_entity_id)

    def _mark_domain_id_filter_satisfied(self, hints):
        if hints:
            for filter in hints.filters:
                if (filter['name'] == 'domain_id' and
                        filter['comparator'] == 'equals'):
                    hints.filters.remove(filter)

    def _ensure_domain_id_in_hints(self, hints, domain_id):
        if (domain_id is not None and
                not hints.get_exact_filter_by_name('domain_id')):
            hints.add_filter('domain_id', domain_id)

    def _set_list_limit_in_hints(self, hints, driver):
        """Set list limit in hints from driver.

        If a hints list is provided, the wrapper will insert the relevant
        limit into the hints so that the underlying driver call can try and
        honor it. If the driver does truncate the response, it will update the
        'truncated' attribute in the 'limit' entry in the hints list, which
        enables the caller of this function to know if truncation has taken
        place. If, however, the driver layer is unable to perform truncation,
        the 'limit' entry is simply left in the hints list for the caller to
        handle.

        A _get_list_limit() method is required to be present in the object
        class hierarchy, which returns the limit for this backend to which
        we will truncate.

        If a hints list is not provided in the arguments of the wrapped call
        then any limits set in the config file are ignored.  This allows
        internal use of such wrapped methods where the entire data set is
        needed as input for the calculations of some other API (e.g. get role
        assignments for a given project).

        This method, specific to identity manager, is used instead of more
        general response_truncated, because the limit for identity entities
        can be overridden in domain-specific config files. The driver to use
        is determined during processing of the passed parameters and
        response_truncated is designed to set the limit before any processing.
        """
        if hints is None:
            return

        list_limit = driver._get_list_limit()
        if list_limit:
            hints.set_limit(list_limit)

    # The actual driver calls - these are pre/post processed here as
    # part of the Manager layer to make sure we:
    #
    # - select the right driver for this domain
    # - clear/set domain_ids for drivers that do not support domains
    # - create any ID mapping that might be required

    @notifications.emit_event('authenticate')
    @domains_configured
    @exception_translated('assertion')
    def authenticate(self, request, user_id, password):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        ref = driver.authenticate(entity_id, password)
        ref = self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.USER)
        ref = self._shadow_nonlocal_user(ref)
        self.shadow_users_api.set_last_active_at(ref['id'])
        return ref

    def _assert_default_project_id_is_not_domain(self, default_project_id):
        if default_project_id:
            # make sure project is not a domain
            try:
                project_ref = self.resource_api.get_project(default_project_id)
                if project_ref['is_domain'] is True:
                    msg = _("User's default project ID cannot be a "
                            "domain ID: %s")
                    raise exception.ValidationError(
                        message=(msg % default_project_id))
            except exception.ProjectNotFound:
                # should be idempotent if project is not found so that it is
                # backward compatible
                pass

    @domains_configured
    @exception_translated('user')
    def create_user(self, user_ref, initiator=None):
        user = user_ref.copy()
        if 'password' in user:
            validators.validate_password(user['password'])
        user['name'] = clean.user_name(user['name'])
        user.setdefault('enabled', True)
        user['enabled'] = clean.user_enabled(user['enabled'])
        domain_id = user['domain_id']
        self.resource_api.get_domain(domain_id)

        self._assert_default_project_id_is_not_domain(
            user_ref.get('default_project_id'))

        # For creating a user, the domain is in the object itself
        domain_id = user_ref['domain_id']
        driver = self._select_identity_driver(domain_id)
        user = self._clear_domain_id_if_domain_unaware(driver, user)
        # Generate a local ID - in the future this might become a function of
        # the underlying driver so that it could conform to rules set down by
        # that particular driver type.
        user['id'] = uuid.uuid4().hex
        ref = driver.create_user(user['id'], user)
        notifications.Audit.created(self._USER, user['id'], initiator)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.USER)

    @domains_configured
    @exception_translated('user')
    @MEMOIZE
    def get_user(self, user_id):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        ref = driver.get_user(entity_id)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.USER)

    def assert_user_enabled(self, user_id, user=None):
        """Assert the user and the user's domain are enabled.

        :raise AssertionError if the user or the user's domain is disabled.
        """
        if user is None:
            user = self.get_user(user_id)
        self.resource_api.assert_domain_enabled(user['domain_id'])
        if not user.get('enabled', True):
            raise AssertionError(_('User is disabled: %s') % user_id)

    @domains_configured
    @exception_translated('user')
    @MEMOIZE
    def get_user_by_name(self, user_name, domain_id):
        driver = self._select_identity_driver(domain_id)
        ref = driver.get_user_by_name(user_name, domain_id)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.USER)

    def _translate_expired_password_hints(self, hints):
        """Clean Up Expired Password Hints.

        Any `password_expires_at` filters on the `list_users` or
        `list_users_in_group` queries are modified so the call will
        return valid data.

        The filters `comparator` is changed to the operator specified in
        the call, otherwise it is assumed to be `equals`. The filters
        `value` becomes the timestamp specified. Both the operator and
        timestamp are validated, and will raise a InvalidOperatorError
        or ValidationTimeStampError exception respectively if invalid.

        """
        operators = {'lt': operator.lt, 'gt': operator.gt,
                     'eq': operator.eq, 'lte': operator.le,
                     'gte': operator.ge, 'neq': operator.ne}
        for filter_ in hints.filters:
            if 'password_expires_at' == filter_['name']:
                # password_expires_at must be in the format
                # 'lt:2016-11-06T15:32:17Z'. So we can assume the position
                # of the ':' otherwise assign the operator to equals.
                if ':' in filter_['value'][2:4]:
                    op, timestamp = filter_['value'].split(':', 1)
                else:
                    op = 'eq'
                    timestamp = filter_['value']

                try:
                    filter_['value'] = timeutils.parse_isotime(timestamp)
                except ValueError:
                    raise exception.ValidationTimeStampError

                try:
                    filter_['comparator'] = operators[op]
                except KeyError:
                    raise exception.InvalidOperatorError(op)
        return hints

    def _handle_federated_attributes_in_hints(self, driver, hints):
        federated_attributes = ['idp_id', 'protocol_id', 'unique_id']
        for filter_ in hints.filters:
            if filter_['name'] in federated_attributes:
                return self.shadow_users_api.get_federated_users(hints)
        return driver.list_users(hints)

    @domains_configured
    @exception_translated('user')
    def list_users(self, domain_scope=None, hints=None):
        driver = self._select_identity_driver(domain_scope)
        self._set_list_limit_in_hints(hints, driver)
        hints = hints or driver_hints.Hints()
        if driver.is_domain_aware():
            # Force the domain_scope into the hint to ensure that we only get
            # back domains for that scope.
            self._ensure_domain_id_in_hints(hints, domain_scope)
        else:
            # We are effectively satisfying any domain_id filter by the above
            # driver selection, so remove any such filter.
            self._mark_domain_id_filter_satisfied(hints)
        hints = self._translate_expired_password_hints(hints)
        ref_list = self._handle_federated_attributes_in_hints(driver, hints)
        return self._set_domain_id_and_mapping(
            ref_list, domain_scope, driver, mapping.EntityType.USER)

    def _require_matching_domain_id(self, new_ref, orig_ref):
        """Ensure the current domain ID matches the reference one, if any.

        Provided we want domain IDs to be immutable, check whether any
        domain_id specified in the ref dictionary matches the existing
        domain_id for this entity.

        :param new_ref: the dictionary of new values proposed for this entity
        :param orig_ref: the dictionary of original values proposed for this
                         entity
        :raises: :class:`keystone.exception.ValidationError`
        """
        if 'domain_id' in new_ref:
            if new_ref['domain_id'] != orig_ref['domain_id']:
                raise exception.ValidationError(_('Cannot change Domain ID'))

    @domains_configured
    @exception_translated('user')
    def update_user(self, user_id, user_ref, initiator=None):
        old_user_ref = self.get_user(user_id)
        user = user_ref.copy()
        self._require_matching_domain_id(user, old_user_ref)
        if 'password' in user:
            validators.validate_password(user['password'])
        if 'name' in user:
            user['name'] = clean.user_name(user['name'])
        if 'enabled' in user:
            user['enabled'] = clean.user_enabled(user['enabled'])
        if 'id' in user:
            if user_id != user['id']:
                raise exception.ValidationError(_('Cannot change user ID'))
            # Since any ID in the user dict is now irrelevant, remove its so as
            # the driver layer won't be confused by the fact the this is the
            # public ID not the local ID
            user.pop('id')

        self._assert_default_project_id_is_not_domain(
            user_ref.get('default_project_id'))

        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        user = self._clear_domain_id_if_domain_unaware(driver, user)
        self.get_user.invalidate(self, old_user_ref['id'])
        self.get_user_by_name.invalidate(self, old_user_ref['name'],
                                         old_user_ref['domain_id'])

        ref = driver.update_user(entity_id, user)

        notifications.Audit.updated(self._USER, user_id, initiator)

        enabled_change = ((user.get('enabled') is False) and
                          user['enabled'] != old_user_ref.get('enabled'))
        if enabled_change or user.get('password') is not None:
            self.emit_invalidate_user_token_persistence(user_id)

        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.USER)

    @domains_configured
    @exception_translated('user')
    def delete_user(self, user_id, initiator=None):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        # Get user details to invalidate the cache.
        user_old = self.get_user(user_id)
        driver.delete_user(entity_id)
        self.assignment_api.delete_user_assignments(user_id)
        self.get_user.invalidate(self, user_id)
        self.get_user_by_name.invalidate(self, user_old['name'],
                                         user_old['domain_id'])
        self.credential_api.delete_credentials_for_user(user_id)
        self.id_mapping_api.delete_id_mapping(user_id)
        notifications.Audit.deleted(self._USER, user_id, initiator)

        # Invalidate user role assignments cache region, as it may be caching
        # role assignments where the actor is the specified user
        assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()

    @domains_configured
    @exception_translated('group')
    def create_group(self, group_ref, initiator=None):
        group = group_ref.copy()
        group.setdefault('description', '')
        domain_id = group['domain_id']
        self.resource_api.get_domain(domain_id)

        # For creating a group, the domain is in the object itself
        domain_id = group_ref['domain_id']
        driver = self._select_identity_driver(domain_id)
        group = self._clear_domain_id_if_domain_unaware(driver, group)
        # Generate a local ID - in the future this might become a function of
        # the underlying driver so that it could conform to rules set down by
        # that particular driver type.
        group['id'] = uuid.uuid4().hex
        group['name'] = clean.group_name(group['name'])
        ref = driver.create_group(group['id'], group)

        notifications.Audit.created(self._GROUP, group['id'], initiator)

        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    @MEMOIZE
    def get_group(self, group_id):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        ref = driver.get_group(entity_id)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    def get_group_by_name(self, group_name, domain_id):
        driver = self._select_identity_driver(domain_id)
        ref = driver.get_group_by_name(group_name, domain_id)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    def update_group(self, group_id, group, initiator=None):
        old_group_ref = self.get_group(group_id)
        self._require_matching_domain_id(group, old_group_ref)
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        group = self._clear_domain_id_if_domain_unaware(driver, group)
        if 'name' in group:
            group['name'] = clean.group_name(group['name'])
        ref = driver.update_group(entity_id, group)
        self.get_group.invalidate(self, group_id)
        notifications.Audit.updated(self._GROUP, group_id, initiator)
        return self._set_domain_id_and_mapping(
            ref, domain_id, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    def delete_group(self, group_id, initiator=None):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        roles = self.assignment_api.list_role_assignments(group_id=group_id)
        user_ids = (u['id'] for u in self.list_users_in_group(group_id))
        driver.delete_group(entity_id)
        self.get_group.invalidate(self, group_id)
        self.id_mapping_api.delete_id_mapping(group_id)
        self.assignment_api.delete_group_assignments(group_id)

        notifications.Audit.deleted(self._GROUP, group_id, initiator)

        # If the group has been created and has users but has no role
        # assignment for the group then we do not need to revoke all the users
        # tokens and can just delete the group.
        if roles:
            for uid in user_ids:
                self.emit_invalidate_user_token_persistence(uid)

        # Invalidate user role assignments cache region, as it may be caching
        # role assignments expanded from the specified group to its users
        assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()

    @domains_configured
    @exception_translated('group')
    def add_user_to_group(self, user_id, group_id, initiator=None):
        @exception_translated('user')
        def get_entity_info_for_user(public_id):
            return self._get_domain_driver_and_entity_id(public_id)

        _domain_id, group_driver, group_entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        # Get the same info for the user_id, taking care to map any
        # exceptions correctly
        _domain_id, user_driver, user_entity_id = (
            get_entity_info_for_user(user_id))

        self._assert_user_and_group_in_same_backend(
            user_entity_id, user_driver, group_entity_id, group_driver)

        group_driver.add_user_to_group(user_entity_id, group_entity_id)

        # Invalidate user role assignments cache region, as it may now need to
        # include role assignments from the specified group to its users
        assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
        notifications.Audit.added_to(self._GROUP, group_id, self._USER,
                                     user_id, initiator)

    @domains_configured
    @exception_translated('group')
    def remove_user_from_group(self, user_id, group_id, initiator=None):
        @exception_translated('user')
        def get_entity_info_for_user(public_id):
            return self._get_domain_driver_and_entity_id(public_id)

        _domain_id, group_driver, group_entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        # Get the same info for the user_id, taking care to map any
        # exceptions correctly
        _domain_id, user_driver, user_entity_id = (
            get_entity_info_for_user(user_id))

        self._assert_user_and_group_in_same_backend(
            user_entity_id, user_driver, group_entity_id, group_driver)

        group_driver.remove_user_from_group(user_entity_id, group_entity_id)
        self.emit_invalidate_user_token_persistence(user_id)

        # Invalidate user role assignments cache region, as it may be caching
        # role assignments expanded from this group to this user
        assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate()
        notifications.Audit.removed_from(self._GROUP, group_id, self._USER,
                                         user_id, initiator)

    def emit_invalidate_user_token_persistence(self, user_id):
        """Emit a notification to the callback system to revoke user tokens.

        This method and associated callback listener removes the need for
        making a direct call to another manager to delete and revoke tokens.

        :param user_id: user identifier
        :type user_id: string
        """
        notifications.Audit.internal(
            notifications.INVALIDATE_USER_TOKEN_PERSISTENCE, user_id
        )

    def emit_invalidate_grant_token_persistence(self, user_project):
        """Emit a notification to the callback system to revoke grant tokens.

        This method and associated callback listener removes the need for
        making a direct call to another manager to delete and revoke tokens.

        :param user_project: {'user_id': user_id, 'project_id': project_id}
        :type user_project: dict
        """
        notifications.Audit.internal(
            notifications.INVALIDATE_USER_PROJECT_TOKEN_PERSISTENCE,
            user_project
        )

    @domains_configured
    @exception_translated('user')
    def list_groups_for_user(self, user_id, hints=None):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        self._set_list_limit_in_hints(hints, driver)
        hints = hints or driver_hints.Hints()
        if not driver.is_domain_aware():
            # We are effectively satisfying any domain_id filter by the above
            # driver selection, so remove any such filter
            self._mark_domain_id_filter_satisfied(hints)
        ref_list = driver.list_groups_for_user(entity_id, hints)
        return self._set_domain_id_and_mapping(
            ref_list, domain_id, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    def list_groups(self, domain_scope=None, hints=None):
        driver = self._select_identity_driver(domain_scope)
        self._set_list_limit_in_hints(hints, driver)
        hints = hints or driver_hints.Hints()
        if driver.is_domain_aware():
            # Force the domain_scope into the hint to ensure that we only get
            # back domains for that scope.
            self._ensure_domain_id_in_hints(hints, domain_scope)
        else:
            # We are effectively satisfying any domain_id filter by the above
            # driver selection, so remove any such filter.
            self._mark_domain_id_filter_satisfied(hints)
        ref_list = driver.list_groups(hints)
        return self._set_domain_id_and_mapping(
            ref_list, domain_scope, driver, mapping.EntityType.GROUP)

    @domains_configured
    @exception_translated('group')
    def list_users_in_group(self, group_id, hints=None):
        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        self._set_list_limit_in_hints(hints, driver)
        hints = hints or driver_hints.Hints()
        if not driver.is_domain_aware():
            # We are effectively satisfying any domain_id filter by the above
            # driver selection, so remove any such filter
            self._mark_domain_id_filter_satisfied(hints)
        hints = self._translate_expired_password_hints(hints)
        ref_list = driver.list_users_in_group(entity_id, hints)
        return self._set_domain_id_and_mapping(
            ref_list, domain_id, driver, mapping.EntityType.USER)

    @domains_configured
    @exception_translated('group')
    def check_user_in_group(self, user_id, group_id):
        @exception_translated('user')
        def get_entity_info_for_user(public_id):
            return self._get_domain_driver_and_entity_id(public_id)

        _domain_id, group_driver, group_entity_id = (
            self._get_domain_driver_and_entity_id(group_id))
        # Get the same info for the user_id, taking care to map any
        # exceptions correctly
        _domain_id, user_driver, user_entity_id = (
            get_entity_info_for_user(user_id))

        self._assert_user_and_group_in_same_backend(
            user_entity_id, user_driver, group_entity_id, group_driver)

        return group_driver.check_user_in_group(user_entity_id,
                                                group_entity_id)

    @domains_configured
    def change_password(self, request, user_id, original_password,
                        new_password, initiator=None):

        # authenticate() will raise an AssertionError if authentication fails
        try:
            self.authenticate(request, user_id, original_password)
        except exception.PasswordExpired:
            # If a password has expired, we want users to be able to change it
            pass

        domain_id, driver, entity_id = (
            self._get_domain_driver_and_entity_id(user_id))
        try:
            validators.validate_password(new_password)
            driver.change_password(entity_id, new_password)
        except exception.PasswordValidationError as ex:
            audit_reason = reason.Reason(str(ex), str(ex.code))
            notifications.Audit.updated(self._USER, user_id,
                                        initiator, reason=audit_reason)
            raise

        notifications.Audit.updated(self._USER, user_id, initiator)
        self.emit_invalidate_user_token_persistence(user_id)

    @MEMOIZE
    def _shadow_nonlocal_user(self, user):
        try:
            return self.shadow_users_api.get_user(user['id'])
        except exception.UserNotFound:
            return self.shadow_users_api.create_nonlocal_user(user)

    @MEMOIZE
    def shadow_federated_user(self, idp_id, protocol_id, unique_id,
                              display_name):
        """Map a federated user to a user.

        :param idp_id: identity provider id
        :param protocol_id: protocol id
        :param unique_id: unique id for the user within the IdP
        :param display_name: user's display name

        :returns: dictionary of the mapped User entity
        """
        user_dict = {}
        try:
            self.shadow_users_api.update_federated_user_display_name(
                idp_id, protocol_id, unique_id, display_name)
            user_dict = self.shadow_users_api.get_federated_user(
                idp_id, protocol_id, unique_id)
        except exception.UserNotFound:
            idp = self.federation_api.get_idp(idp_id)
            federated_dict = {
                'idp_id': idp_id,
                'protocol_id': protocol_id,
                'unique_id': unique_id,
                'display_name': display_name
            }
            user_dict = (
                self.shadow_users_api.create_federated_user(idp['domain_id'],
                                                            federated_dict))
        self.shadow_users_api.set_last_active_at(user_dict['id'])
        return user_dict


@dependency.provider('id_mapping_api')
class MappingManager(manager.Manager):
    """Default pivot point for the ID Mapping backend."""

    driver_namespace = 'keystone.identity.id_mapping'

    def __init__(self):
        super(MappingManager, self).__init__(CONF.identity_mapping.driver)

    @MEMOIZE_ID_MAPPING
    def _get_public_id(self, domain_id, local_id, entity_type):
        return self.driver.get_public_id({'domain_id': domain_id,
                                          'local_id': local_id,
                                          'entity_type': entity_type})

    def get_public_id(self, local_entity):
        return self._get_public_id(local_entity['domain_id'],
                                   local_entity['local_id'],
                                   local_entity['entity_type'])

    @MEMOIZE_ID_MAPPING
    def get_id_mapping(self, public_id):
        return self.driver.get_id_mapping(public_id)

    def create_id_mapping(self, local_entity, public_id=None):
        public_id = self.driver.create_id_mapping(local_entity, public_id)
        if MEMOIZE_ID_MAPPING.should_cache(public_id):
            self._get_public_id.set(public_id, self,
                                    local_entity['domain_id'],
                                    local_entity['local_id'],
                                    local_entity['entity_type'])
            self.get_id_mapping.set(local_entity, self, public_id)
        return public_id

    def delete_id_mapping(self, public_id):
        local_entity = self.get_id_mapping.get(self, public_id)
        self.driver.delete_id_mapping(public_id)
        # Delete the key of entity from cache
        if local_entity:
            self._get_public_id.invalidate(self, local_entity['domain_id'],
                                           local_entity['local_id'],
                                           local_entity['entity_type'])
        self.get_id_mapping.invalidate(self, public_id)

    def purge_mappings(self, purge_filter):
        # Purge mapping is rarely used and only used by the command client,
        # it's quite complex to invalidate part of the cache based on the purge
        # filters, so here invalidate the whole cache when purging mappings.
        self.driver.purge_mappings(purge_filter)
        ID_MAPPING_REGION.invalidate()


@dependency.provider('shadow_users_api')
class ShadowUsersManager(manager.Manager):
    """Default pivot point for the Shadow Users backend."""

    driver_namespace = 'keystone.identity.shadow_users'

    def __init__(self):
        shadow_driver = CONF.shadow_users.driver

        super(ShadowUsersManager, self).__init__(shadow_driver)