File ocata_live_migrations_to_pike.patch of Package openstack-cinder

diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py
index f4ec90ae3..1d830554f 100644
--- a/cinder/cmd/manage.py
+++ b/cinder/cmd/manage.py
@@ -205,7 +205,8 @@ class HostCommands(object):
 class DbCommands(object):
     """Class for managing the database."""
 
-    online_migrations = ()
+    online_migrations = (db.migrate_consistencygroups_to_groups,
+                         db.migrate_add_message_prefix)
 
     def __init__(self):
         pass
diff --git a/cinder/db/api.py b/cinder/db/api.py
index 25c406595..013ac50d7 100644
--- a/cinder/db/api.py
+++ b/cinder/db/api.py
@@ -1888,3 +1888,24 @@ def conditional_update(context, model, values, expected_values, filters=(),
     return IMPL.conditional_update(context, model, values, expected_values,
                                    filters, include_deleted, project_only,
                                    order)
+
+##########################
+# Ocata live migrations
+
+
+def migrate_consistencygroups_to_groups(context, max_count, force=False):
+    """Migrage CGs to generic volume groups"""
+    return IMPL.migrate_consistencygroups_to_groups(context, max_count, force)
+
+
+def migrate_add_message_prefix(context, max_count, force=False):
+    """Change Message event ids to start with the VOLUME_ prefix.
+
+    :param max_count: The maximum number of messages to consider in
+                      this run.
+    :param force: Ignored in this migration
+    :returns: number of messages needing migration, number of
+              messages migrated (both will always be less than
+              max_count).
+    """
+    return IMPL.migrate_add_message_prefix(context, max_count, force)
diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py
index d80fbfd82..7840512dd 100644
--- a/cinder/db/sqlalchemy/api.py
+++ b/cinder/db/sqlalchemy/api.py
@@ -61,6 +61,7 @@ from cinder.i18n import _
 from cinder.objects import fields
 from cinder import utils
 from cinder.volume import utils as vol_utils
+from cinder.volume import group_types
 
 
 CONF = cfg.CONF
@@ -6932,3 +6933,171 @@ def conditional_update(context, model, values, expected_values, filters=(),
     # Return True if we were able to change any DB entry, False otherwise
     result = query.update(values, **update_args)
     return 0 != result
+
+##########################
+# Ocata live migrations
+
+
+@require_admin_context
+def migrate_consistencygroups_to_groups(context, max_count, force=False):
+    now = timeutils.utcnow()
+    grps = model_query(context, models.Group)
+    ids = [grp.id for grp in grps] if grps else []
+    # NOTE(xyang): We are using the same IDs in the CG and Group tables.
+    # This is because we are deleting the entry from the CG table after
+    # migrating it to the Group table. Also when the user queries a CG id,
+    # we will display it whether it is in the CG table or the Group table.
+    # Without using the same IDs, we'll have to add a consistencygroup_id
+    # column in the Group group to correlate it with the CG entry so we
+    # know whether it has been migrated or not. It makes things more
+    # complicated especially because the CG entry will be removed after
+    # migration.
+    query = (model_query(context, models.ConsistencyGroup).
+             filter(models.ConsistencyGroup.id.notin_(ids)))
+    cgs = query.limit(max_count)
+
+    # Check if default group_type for migrating cgsnapshots exists
+    result = (model_query(context, models.GroupTypes,
+                          project_only=True).
+              filter_by(name=group_types.DEFAULT_CGSNAPSHOT_TYPE).
+              first())
+    if not result:
+        msg = (_('Group type %s not found. Rerun migration script to create '
+                 'the default cgsnapshot type.') %
+               group_types.DEFAULT_CGSNAPSHOT_TYPE)
+        raise exception.NotFound(msg)
+    grp_type_id = result['id']
+
+    count_all = 0
+    count_hit = 0
+    for cg in cgs.all():
+        cg_ids = []
+        cgsnapshot_ids = []
+        volume_ids = []
+        snapshot_ids = []
+        session = get_session()
+        with session.begin():
+            count_all += 1
+            cgsnapshot_list = []
+            vol_list = []
+
+            # NOTE(dulek): We should avoid modifying consistency groups that
+            # are in the middle of some operation.
+            if not force:
+                if cg.status not in (fields.ConsistencyGroupStatus.AVAILABLE,
+                                     fields.ConsistencyGroupStatus.ERROR,
+                                     fields.ConsistencyGroupStatus.DELETING):
+                    continue
+
+            # Migrate CG to group
+            grp = model_query(context, models.Group,
+                              session=session).filter_by(id=cg.id).first()
+            if grp:
+                # NOTE(xyang): This CG is already migrated to group.
+                continue
+
+            values = {'id': cg.id,
+                      'created_at': now,
+                      'updated_at': now,
+                      'deleted': False,
+                      'user_id': cg.user_id,
+                      'project_id': cg.project_id,
+                      'host': cg.host,
+                      'cluster_name': cg.cluster_name,
+                      'availability_zone': cg.availability_zone,
+                      'name': cg.name,
+                      'description': cg.description,
+                      'group_type_id': grp_type_id,
+                      'status': cg.status,
+                      'group_snapshot_id': cg.cgsnapshot_id,
+                      'source_group_id': cg.source_cgid,
+                      }
+
+            mappings = []
+            for item in cg.volume_type_id.rstrip(',').split(','):
+                mapping = models.GroupVolumeTypeMapping()
+                mapping['volume_type_id'] = item
+                mapping['group_id'] = cg.id
+                mappings.append(mapping)
+
+            values['volume_types'] = mappings
+
+            grp = models.Group()
+            grp.update(values)
+            session.add(grp)
+            cg_ids.append(cg.id)
+
+            # Update group_id in volumes
+            vol_list = (model_query(context, models.Volume,
+                                    session=session).
+                        filter_by(consistencygroup_id=cg.id).all())
+            for vol in vol_list:
+                vol.group_id = cg.id
+                volume_ids.append(vol.id)
+
+            # Migrate data from cgsnapshots to group_snapshots
+            cgsnapshot_list = (model_query(context, models.Cgsnapshot,
+                                           session=session).
+                               filter_by(consistencygroup_id=cg.id).all())
+
+            for cgsnap in cgsnapshot_list:
+                grp_snap = (model_query(context, models.GroupSnapshot,
+                                        session=session).
+                            filter_by(id=cgsnap.id).first())
+                if grp_snap:
+                    # NOTE(xyang): This CGSnapshot is already migrated to
+                    # group snapshot.
+                    continue
+
+                grp_snap = models.GroupSnapshot()
+                values = {'id': cgsnap.id,
+                          'created_at': now,
+                          'updated_at': now,
+                          'deleted': False,
+                          'user_id': cgsnap.user_id,
+                          'project_id': cgsnap.project_id,
+                          'group_id': cg.id,
+                          'name': cgsnap.name,
+                          'description': cgsnap.description,
+                          'group_type_id': grp_type_id,
+                          'status': cgsnap.status, }
+                grp_snap.update(values)
+                session.add(grp_snap)
+                cgsnapshot_ids.append(cgsnap.id)
+
+                # Update group_snapshot_id in snapshots
+                snap_list = (model_query(context, models.Snapshot,
+                                         session=session).
+                             filter_by(cgsnapshot_id=cgsnap.id).all())
+                for snap in snap_list:
+                    snap.group_snapshot_id = cgsnap.id
+                    snapshot_ids.append(snap.id)
+
+            # Delete entries in CG and CGSnapshot tables
+            cg_cgsnapshot_destroy_all_by_ids(context, cg_ids, cgsnapshot_ids,
+                                             volume_ids, snapshot_ids,
+                                             session=session)
+
+            count_hit += 1
+
+    return count_all, count_hit
+
+
+@require_admin_context
+def migrate_add_message_prefix(context, max_count, force=False):
+    prefix = "VOLUME_"
+    session = get_session()
+    with session.begin():
+        messages = (model_query(context, models.Message.id, session=session).
+                    filter(~models.Message.event_id.like(prefix + '%')).
+                    limit(max_count))
+        ids = [msg[0] for msg in messages.all()]
+        count_all = messages.count()
+        count_hit = 0
+        if ids:
+            count_hit = (model_query(context, models.Message, session=session).
+                         filter(models.Message.id.in_(ids)).
+                         update({'event_id': prefix + models.Message.event_id},
+                                synchronize_session=False))
+
+    return count_all, count_hit
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/096_add_replication_status_to_groups_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/096_add_replication_status_to_groups_table.py
new file mode 100644
index 000000000..d639a0cd0
--- /dev/null
+++ b/cinder/db/sqlalchemy/migrate_repo/versions/096_add_replication_status_to_groups_table.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2017 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from sqlalchemy import Column
+from sqlalchemy import MetaData, String, Table
+
+# This was moved back from 102 to 96 (replacing a placeholder) for the
+# SUSE cloud newton -> pike skipped upgrade. This column is needed before the
+# live_migrations can take place. This is not included upstream.
+
+def upgrade(migrate_engine):
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    # Add replication_status column to groups table
+    table = Table('groups', meta, autoload=True)
+    if not hasattr(table.c, 'replication_status'):
+        new_column = Column('replication_status', String(255), nullable=True)
+        table.create_column(new_column)
diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py b/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py
deleted file mode 100644
index 7f0c9af0d..000000000
--- a/cinder/db/sqlalchemy/migrate_repo/versions/096_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-# This is a placeholder for Mitaka backports.
-# Do not use this number for new Newton work.  New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
-    pass
openSUSE Build Service is sponsored by