2011-10-13 20:51:33 +02:00
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2010-03-15 14:15:01 +01:00
|
|
|
import datetime
|
|
|
|
|
|
|
|
from django.conf import settings
|
2012-11-26 21:52:44 +01:00
|
|
|
from django.db import transaction, DEFAULT_DB_ALIAS, models
|
|
|
|
from django.db.utils import ConnectionHandler
|
2010-10-11 14:55:17 +02:00
|
|
|
from django.test import TestCase, TransactionTestCase, skipUnlessDBFeature
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2011-10-13 20:51:33 +02:00
|
|
|
from .models import (Book, Award, AwardNote, Person, Child, Toy, PlayedWith,
|
|
|
|
PlayedWithNote, Email, Researcher, Food, Eaten, Policy, Version, Location,
|
2012-10-10 14:58:39 +02:00
|
|
|
Item, Image, File, Photo, FooFile, FooImage, FooPhoto, FooFileProxy, Login,
|
2013-02-10 18:49:28 +01:00
|
|
|
OrgUnit, OrderedPerson, House)
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
|
2010-03-15 14:15:01 +01:00
|
|
|
# Can't run this test under SQLite, because you can't
|
|
|
|
# get two connections to an in-memory database.
|
2010-10-11 14:55:17 +02:00
|
|
|
class DeleteLockingTest(TransactionTestCase):
|
|
|
|
def setUp(self):
|
|
|
|
# Create a second connection to the default database
|
2012-11-26 21:52:44 +01:00
|
|
|
new_connections = ConnectionHandler(settings.DATABASES)
|
|
|
|
self.conn2 = new_connections[DEFAULT_DB_ALIAS]
|
2010-10-11 14:55:17 +02:00
|
|
|
# Put both DB connections into managed transaction mode
|
|
|
|
transaction.enter_transaction_management()
|
|
|
|
transaction.managed(True)
|
|
|
|
self.conn2._enter_transaction_management(True)
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
def tearDown(self):
|
|
|
|
# Close down the second connection.
|
|
|
|
transaction.leave_transaction_management()
|
|
|
|
self.conn2.close()
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
@skipUnlessDBFeature('test_db_allows_multiple_connections')
|
|
|
|
def test_concurrent_delete(self):
|
|
|
|
"Deletes on concurrent transactions don't collide and lock the database. Regression for #9479"
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
# Create some dummy data
|
|
|
|
b1 = Book(id=1, pagecount=100)
|
|
|
|
b2 = Book(id=2, pagecount=200)
|
|
|
|
b3 = Book(id=3, pagecount=300)
|
|
|
|
b1.save()
|
|
|
|
b2.save()
|
|
|
|
b3.save()
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
transaction.commit()
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
self.assertEqual(3, Book.objects.count())
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
# Delete something using connection 2.
|
|
|
|
cursor2 = self.conn2.cursor()
|
|
|
|
cursor2.execute('DELETE from delete_regress_book WHERE id=1')
|
2011-06-10 12:18:06 +02:00
|
|
|
self.conn2._commit()
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2010-10-11 14:55:17 +02:00
|
|
|
# Now perform a queryset delete that covers the object
|
|
|
|
# deleted in connection 2. This causes an infinite loop
|
|
|
|
# under MySQL InnoDB unless we keep track of already
|
|
|
|
# deleted objects.
|
|
|
|
Book.objects.filter(pagecount__lt=250).delete()
|
|
|
|
transaction.commit()
|
|
|
|
self.assertEqual(1, Book.objects.count())
|
2011-02-12 14:03:34 +01:00
|
|
|
transaction.commit()
|
2010-03-15 14:15:01 +01:00
|
|
|
|
2011-01-19 22:56:14 +01:00
|
|
|
|
2010-03-15 14:15:01 +01:00
|
|
|
class DeleteCascadeTests(TestCase):
|
|
|
|
def test_generic_relation_cascade(self):
|
|
|
|
"""
|
2011-01-19 22:56:14 +01:00
|
|
|
Django cascades deletes through generic-related objects to their
|
|
|
|
reverse relations.
|
2010-03-15 14:15:01 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
person = Person.objects.create(name='Nelson Mandela')
|
|
|
|
award = Award.objects.create(name='Nobel', content_object=person)
|
|
|
|
note = AwardNote.objects.create(note='a peace prize',
|
|
|
|
award=award)
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(AwardNote.objects.count(), 1)
|
2010-03-15 14:15:01 +01:00
|
|
|
person.delete()
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(Award.objects.count(), 0)
|
2010-03-15 14:15:01 +01:00
|
|
|
# first two asserts are just sanity checks, this is the kicker:
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(AwardNote.objects.count(), 0)
|
2010-03-15 14:15:01 +01:00
|
|
|
|
|
|
|
def test_fk_to_m2m_through(self):
|
|
|
|
"""
|
2011-01-19 22:56:14 +01:00
|
|
|
If an M2M relationship has an explicitly-specified through model, and
|
|
|
|
some other model has an FK to that through model, deletion is cascaded
|
|
|
|
from one of the participants in the M2M, to the through model, to its
|
|
|
|
related model.
|
2010-03-15 14:15:01 +01:00
|
|
|
|
|
|
|
"""
|
|
|
|
juan = Child.objects.create(name='Juan')
|
|
|
|
paints = Toy.objects.create(name='Paints')
|
|
|
|
played = PlayedWith.objects.create(child=juan, toy=paints,
|
|
|
|
date=datetime.date.today())
|
|
|
|
note = PlayedWithNote.objects.create(played=played,
|
|
|
|
note='the next Jackson Pollock')
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(PlayedWithNote.objects.count(), 1)
|
2010-03-15 14:15:01 +01:00
|
|
|
paints.delete()
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(PlayedWith.objects.count(), 0)
|
2010-03-15 14:15:01 +01:00
|
|
|
# first two asserts just sanity checks, this is the kicker:
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(PlayedWithNote.objects.count(), 0)
|
2010-04-09 15:24:13 +02:00
|
|
|
|
2011-05-30 18:04:25 +02:00
|
|
|
def test_15776(self):
|
|
|
|
policy = Policy.objects.create(pk=1, policy_number="1234")
|
|
|
|
version = Version.objects.create(policy=policy)
|
|
|
|
location = Location.objects.create(version=version)
|
|
|
|
item = Item.objects.create(version=version, location=location)
|
|
|
|
policy.delete()
|
|
|
|
|
2011-01-19 22:56:14 +01:00
|
|
|
|
|
|
|
class DeleteCascadeTransactionTests(TransactionTestCase):
|
|
|
|
def test_inheritance(self):
|
|
|
|
"""
|
|
|
|
Auto-created many-to-many through tables referencing a parent model are
|
|
|
|
correctly found by the delete cascade when a child of that parent is
|
|
|
|
deleted.
|
|
|
|
|
|
|
|
Refs #14896.
|
|
|
|
"""
|
|
|
|
r = Researcher.objects.create()
|
|
|
|
email = Email.objects.create(
|
|
|
|
label="office-email", email_address="carl@science.edu"
|
|
|
|
)
|
|
|
|
r.contacts.add(email)
|
|
|
|
|
|
|
|
email.delete()
|
|
|
|
|
2011-01-25 04:14:28 +01:00
|
|
|
def test_to_field(self):
|
|
|
|
"""
|
|
|
|
Cascade deletion works with ForeignKey.to_field set to non-PK.
|
|
|
|
|
|
|
|
"""
|
|
|
|
apple = Food.objects.create(name="apple")
|
|
|
|
eaten = Eaten.objects.create(food=apple, meal="lunch")
|
|
|
|
|
|
|
|
apple.delete()
|
2012-09-20 17:51:30 +02:00
|
|
|
self.assertFalse(Food.objects.exists())
|
|
|
|
self.assertFalse(Eaten.objects.exists())
|
|
|
|
|
2011-01-25 04:14:28 +01:00
|
|
|
|
2010-04-09 15:24:13 +02:00
|
|
|
class LargeDeleteTests(TestCase):
|
|
|
|
def test_large_deletes(self):
|
|
|
|
"Regression for #13309 -- if the number of objects > chunk size, deletion still occurs"
|
|
|
|
for x in range(300):
|
|
|
|
track = Book.objects.create(pagecount=x+100)
|
2012-09-20 17:51:30 +02:00
|
|
|
# attach a signal to make sure we will not fast-delete
|
|
|
|
def noop(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
models.signals.post_delete.connect(noop, sender=Book)
|
2010-04-09 15:24:13 +02:00
|
|
|
Book.objects.all().delete()
|
2012-09-20 17:51:30 +02:00
|
|
|
models.signals.post_delete.disconnect(noop, sender=Book)
|
2011-03-03 16:04:39 +01:00
|
|
|
self.assertEqual(Book.objects.count(), 0)
|
2012-03-05 04:41:01 +01:00
|
|
|
|
|
|
|
|
|
|
|
class ProxyDeleteTest(TestCase):
|
|
|
|
"""
|
2012-03-17 00:27:40 +01:00
|
|
|
Tests on_delete behavior for proxy models.
|
2012-03-05 04:41:01 +01:00
|
|
|
|
|
|
|
See #16128.
|
|
|
|
|
|
|
|
"""
|
2012-03-17 00:27:40 +01:00
|
|
|
def create_image(self):
|
|
|
|
"""Return an Image referenced by both a FooImage and a FooFile."""
|
2012-03-05 04:41:01 +01:00
|
|
|
# Create an Image
|
2012-03-17 00:27:40 +01:00
|
|
|
test_image = Image()
|
|
|
|
test_image.save()
|
|
|
|
foo_image = FooImage(my_image=test_image)
|
2012-03-05 04:41:01 +01:00
|
|
|
foo_image.save()
|
|
|
|
|
|
|
|
# Get the Image instance as a File
|
2012-03-17 00:27:40 +01:00
|
|
|
test_file = File.objects.get(pk=test_image.pk)
|
2012-03-05 04:41:01 +01:00
|
|
|
foo_file = FooFile(my_file=test_file)
|
|
|
|
foo_file.save()
|
|
|
|
|
2012-03-17 00:27:40 +01:00
|
|
|
return test_image
|
|
|
|
|
|
|
|
|
|
|
|
def test_delete_proxy(self):
|
|
|
|
"""
|
|
|
|
Deleting the *proxy* instance bubbles through to its non-proxy and
|
|
|
|
*all* referring objects are deleted.
|
|
|
|
|
|
|
|
"""
|
|
|
|
self.create_image()
|
2012-03-05 04:41:01 +01:00
|
|
|
|
|
|
|
Image.objects.all().delete()
|
|
|
|
|
|
|
|
# An Image deletion == File deletion
|
|
|
|
self.assertEqual(len(Image.objects.all()), 0)
|
|
|
|
self.assertEqual(len(File.objects.all()), 0)
|
|
|
|
|
|
|
|
# The Image deletion cascaded and *all* references to it are deleted.
|
|
|
|
self.assertEqual(len(FooImage.objects.all()), 0)
|
|
|
|
self.assertEqual(len(FooFile.objects.all()), 0)
|
|
|
|
|
|
|
|
|
2012-03-17 00:27:40 +01:00
|
|
|
def test_delete_proxy_of_proxy(self):
|
|
|
|
"""
|
|
|
|
Deleting a proxy-of-proxy instance should bubble through to its proxy
|
|
|
|
and non-proxy parents, deleting *all* referring objects.
|
2012-03-05 04:41:01 +01:00
|
|
|
|
2012-03-17 00:27:40 +01:00
|
|
|
"""
|
|
|
|
test_image = self.create_image()
|
2012-03-05 04:41:01 +01:00
|
|
|
|
|
|
|
# Get the Image as a Photo
|
2012-03-17 00:27:40 +01:00
|
|
|
test_photo = Photo.objects.get(pk=test_image.pk)
|
2012-03-05 04:41:01 +01:00
|
|
|
foo_photo = FooPhoto(my_photo=test_photo)
|
|
|
|
foo_photo.save()
|
|
|
|
|
|
|
|
Photo.objects.all().delete()
|
|
|
|
|
|
|
|
# A Photo deletion == Image deletion == File deletion
|
|
|
|
self.assertEqual(len(Photo.objects.all()), 0)
|
|
|
|
self.assertEqual(len(Image.objects.all()), 0)
|
|
|
|
self.assertEqual(len(File.objects.all()), 0)
|
|
|
|
|
|
|
|
# The Photo deletion should have cascaded and deleted *all*
|
|
|
|
# references to it.
|
|
|
|
self.assertEqual(len(FooPhoto.objects.all()), 0)
|
|
|
|
self.assertEqual(len(FooFile.objects.all()), 0)
|
|
|
|
self.assertEqual(len(FooImage.objects.all()), 0)
|
|
|
|
|
|
|
|
|
2012-03-17 00:27:40 +01:00
|
|
|
def test_delete_concrete_parent(self):
|
|
|
|
"""
|
|
|
|
Deleting an instance of a concrete model should also delete objects
|
|
|
|
referencing its proxy subclass.
|
2012-03-05 04:41:01 +01:00
|
|
|
|
2012-03-17 00:27:40 +01:00
|
|
|
"""
|
|
|
|
self.create_image()
|
2012-03-05 04:41:01 +01:00
|
|
|
|
|
|
|
File.objects.all().delete()
|
|
|
|
|
|
|
|
# A File deletion == Image deletion
|
|
|
|
self.assertEqual(len(File.objects.all()), 0)
|
|
|
|
self.assertEqual(len(Image.objects.all()), 0)
|
|
|
|
|
|
|
|
# The File deletion should have cascaded and deleted *all* references
|
|
|
|
# to it.
|
|
|
|
self.assertEqual(len(FooFile.objects.all()), 0)
|
|
|
|
self.assertEqual(len(FooImage.objects.all()), 0)
|
2012-03-17 02:24:39 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_delete_proxy_pair(self):
|
|
|
|
"""
|
|
|
|
If a pair of proxy models are linked by an FK from one concrete parent
|
|
|
|
to the other, deleting one proxy model cascade-deletes the other, and
|
|
|
|
the deletion happens in the right order (not triggering an
|
|
|
|
IntegrityError on databases unable to defer integrity checks).
|
|
|
|
|
|
|
|
Refs #17918.
|
|
|
|
|
|
|
|
"""
|
|
|
|
# Create an Image (proxy of File) and FooFileProxy (proxy of FooFile,
|
|
|
|
# which has an FK to File)
|
|
|
|
image = Image.objects.create()
|
|
|
|
as_file = File.objects.get(pk=image.pk)
|
|
|
|
FooFileProxy.objects.create(my_file=as_file)
|
|
|
|
|
|
|
|
Image.objects.all().delete()
|
|
|
|
|
|
|
|
self.assertEqual(len(FooFileProxy.objects.all()), 0)
|
2012-10-10 14:58:39 +02:00
|
|
|
|
2012-10-25 14:27:43 +02:00
|
|
|
def test_19187_values(self):
|
|
|
|
with self.assertRaises(TypeError):
|
|
|
|
Image.objects.values().delete()
|
|
|
|
with self.assertRaises(TypeError):
|
|
|
|
Image.objects.values_list().delete()
|
|
|
|
|
2012-10-10 14:58:39 +02:00
|
|
|
class Ticket19102Tests(TestCase):
|
|
|
|
"""
|
|
|
|
Test different queries which alter the SELECT clause of the query. We
|
|
|
|
also must be using a subquery for the deletion (that is, the original
|
|
|
|
query has a join in it). The deletion should be done as "fast-path"
|
|
|
|
deletion (that is, just one query for the .delete() call).
|
|
|
|
|
|
|
|
Note that .values() is not tested here on purpose. .values().delete()
|
|
|
|
doesn't work for non fast-path deletes at all.
|
|
|
|
"""
|
|
|
|
def setUp(self):
|
|
|
|
self.o1 = OrgUnit.objects.create(name='o1')
|
|
|
|
self.o2 = OrgUnit.objects.create(name='o2')
|
|
|
|
self.l1 = Login.objects.create(description='l1', orgunit=self.o1)
|
|
|
|
self.l2 = Login.objects.create(description='l2', orgunit=self.o2)
|
|
|
|
|
|
|
|
@skipUnlessDBFeature("update_can_self_select")
|
|
|
|
def test_ticket_19102_annotate(self):
|
|
|
|
with self.assertNumQueries(1):
|
|
|
|
Login.objects.order_by('description').filter(
|
|
|
|
orgunit__name__isnull=False
|
|
|
|
).annotate(
|
|
|
|
n=models.Count('description')
|
|
|
|
).filter(
|
|
|
|
n=1, pk=self.l1.pk
|
|
|
|
).delete()
|
|
|
|
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
|
|
|
|
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
|
|
|
|
|
|
|
|
@skipUnlessDBFeature("update_can_self_select")
|
|
|
|
def test_ticket_19102_extra(self):
|
|
|
|
with self.assertNumQueries(1):
|
|
|
|
Login.objects.order_by('description').filter(
|
|
|
|
orgunit__name__isnull=False
|
|
|
|
).extra(
|
|
|
|
select={'extraf':'1'}
|
|
|
|
).filter(
|
|
|
|
pk=self.l1.pk
|
|
|
|
).delete()
|
|
|
|
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
|
|
|
|
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
|
|
|
|
|
|
|
|
@skipUnlessDBFeature("update_can_self_select")
|
|
|
|
@skipUnlessDBFeature('can_distinct_on_fields')
|
|
|
|
def test_ticket_19102_distinct_on(self):
|
|
|
|
# Both Login objs should have same description so that only the one
|
|
|
|
# having smaller PK will be deleted.
|
|
|
|
Login.objects.update(description='description')
|
|
|
|
with self.assertNumQueries(1):
|
|
|
|
Login.objects.distinct('description').order_by('pk').filter(
|
|
|
|
orgunit__name__isnull=False
|
|
|
|
).delete()
|
|
|
|
# Assumed that l1 which is created first has smaller PK.
|
|
|
|
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
|
|
|
|
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
|
|
|
|
|
|
|
|
@skipUnlessDBFeature("update_can_self_select")
|
|
|
|
def test_ticket_19102_select_related(self):
|
|
|
|
with self.assertNumQueries(1):
|
|
|
|
Login.objects.filter(
|
|
|
|
pk=self.l1.pk
|
|
|
|
).filter(
|
|
|
|
orgunit__name__isnull=False
|
|
|
|
).order_by(
|
|
|
|
'description'
|
|
|
|
).select_related('orgunit').delete()
|
|
|
|
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
|
|
|
|
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
|
|
|
|
|
|
|
|
@skipUnlessDBFeature("update_can_self_select")
|
|
|
|
def test_ticket_19102_defer(self):
|
|
|
|
with self.assertNumQueries(1):
|
|
|
|
Login.objects.filter(
|
|
|
|
pk=self.l1.pk
|
|
|
|
).filter(
|
|
|
|
orgunit__name__isnull=False
|
|
|
|
).order_by(
|
|
|
|
'description'
|
|
|
|
).only('id').delete()
|
|
|
|
self.assertFalse(Login.objects.filter(pk=self.l1.pk).exists())
|
|
|
|
self.assertTrue(Login.objects.filter(pk=self.l2.pk).exists())
|
|
|
|
|
2013-02-10 18:49:28 +01:00
|
|
|
|
|
|
|
class OrderedDeleteTests(TestCase):
|
|
|
|
def test_meta_ordered_delete(self):
|
|
|
|
# When a subquery is performed by deletion code, the subquery must be
|
|
|
|
# cleared of all ordering. There was a but that caused _meta ordering
|
|
|
|
# to be used. Refs #19720.
|
|
|
|
h = House.objects.create(address='Foo')
|
|
|
|
OrderedPerson.objects.create(name='Jack', lives_in=h)
|
|
|
|
OrderedPerson.objects.create(name='Bob', lives_in=h)
|
|
|
|
OrderedPerson.objects.filter(lives_in__address='Foo').delete()
|
|
|
|
self.assertEqual(OrderedPerson.objects.count(), 0)
|