this repo has no description
1/* -*- mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- */
2/*
3 * Main authors:
4 * Christopher Mears <chris.mears@monash.edu>
5 *
6 * Copyright:
7 * Christopher Mears, 2012
8 *
9 * This file is part of Gecode, the generic constraint
10 * development environment:
11 * http://www.gecode.org
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining
14 * a copy of this software and associated documentation files (the
15 * "Software"), to deal in the Software without restriction, including
16 * without limitation the rights to use, copy, modify, merge, publish,
17 * distribute, sublicense, and/or sell copies of the Software, and to
18 * permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be
22 * included in all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
28 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
29 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
30 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 *
32 */
33
34namespace Gecode { namespace Int { namespace LDSB {
35
36 /// Convert a \a DynamicStack<T,A> into an \a ArgArray<T>
37 template <class T, class A>
38 ArgArray<T>
39 dynamicStackToArgArray(const Support::DynamicStack<T,A>& s) {
40 ArgArray<T> a(s.entries());
41 for (int i = 0 ; i < s.entries() ; ++i) {
42 a[i] = s[i];
43 }
44 return a;
45 }
46
47 template<class View>
48 SymmetryImp<View>::~SymmetryImp(void) {}
49
50 template<class View>
51 void*
52 SymmetryImp<View>::operator new(size_t s, Space& home) {
53 return home.ralloc(s);
54 }
55
56 template<class View>
57 void
58 SymmetryImp<View>::operator delete(void*,Space&) {}
59
60 template<class View>
61 void
62 SymmetryImp<View>::operator delete(void*) {}
63
64 template <class View>
65 VariableSymmetryImp<View>
66 ::VariableSymmetryImp(Space& home, int* _indices, unsigned int n)
67 : indices(home, 0, 0) {
68 // Find minimum and maximum value in _indices: the minimum is the
69 // offset, and the maximum dictates how large the bitset needs to
70 // be.
71 int maximum = _indices[0];
72 int minimum = _indices[0];
73 for (unsigned int i = 1 ; i < n ; i++) {
74 if (_indices[i] > maximum) maximum = _indices[i];
75 if (_indices[i] < minimum) minimum = _indices[i];
76 }
77 indices.resize(home, maximum-minimum+1, minimum);
78
79 // Set the bits for the included indices.
80 for (unsigned int i = 0 ; i < n ; i++) {
81 indices.set(_indices[i]);
82 }
83 }
84
85
86
87 template <class View>
88 inline
89 VariableSymmetryImp<View>
90 ::VariableSymmetryImp(Space& home, const VariableSymmetryImp& other) :
91 indices(home, other.indices) {}
92
93 template <class View>
94 size_t
95 VariableSymmetryImp<View>
96 ::dispose(Space& home) {
97 indices.dispose(home);
98 return sizeof(*this);
99 }
100
101 template <class View>
102 void
103 VariableSymmetryImp<View>
104 ::update(Literal l) {
105 if (indices.valid(l._variable)) {
106 indices.clear(l._variable);
107 }
108 }
109
110 template <class View>
111 SymmetryImp<View>*
112 VariableSymmetryImp<View>::copy(Space& home) const {
113 return new (home) VariableSymmetryImp<View>(home, *this);
114 }
115
116
117
118 // The minimum value in vs is the bitset's offset, and the maximum
119 // dictates how large the bitset needs to be.
120 template <class View>
121 ValueSymmetryImp<View>
122 ::ValueSymmetryImp(Space& home, int* vs, unsigned int n)
123 : values(home, 0, 0) {
124 // Find minimum and maximum value in vs: the minimum is the
125 // offset, and the maximum dictates how large the bitset needs to
126 // be.
127 assert(n > 0);
128 int maximum = vs[0];
129 int minimum = vs[0];
130 for (unsigned int i = 1 ; i < n ; i++) {
131 if (vs[i] > maximum) maximum = vs[i];
132 if (vs[i] < minimum) minimum = vs[i];
133 }
134 values.resize(home, maximum-minimum+1, minimum);
135
136 // Set the bits for the included values.
137 for (unsigned int i = 0 ; i < n ; i++) {
138 values.set(vs[i]);
139 }
140 }
141
142 template <class View>
143 ValueSymmetryImp<View>
144 ::ValueSymmetryImp(Space& home, const ValueSymmetryImp<View>& other)
145 : values(home, other.values) { }
146
147 template <class View>
148 size_t
149 ValueSymmetryImp<View>
150 ::dispose(Space& home) {
151 values.dispose(home);
152 return sizeof(*this);
153 }
154
155 template <class View>
156 void
157 ValueSymmetryImp<View>
158 ::update(Literal l) {
159 if (values.valid(l._value))
160 values.clear(l._value);
161 }
162
163 template <class View>
164 SymmetryImp<View>*
165 ValueSymmetryImp<View>::copy(Space& home) const {
166 return new (home) ValueSymmetryImp(home, *this);
167 }
168
169
170
171 template <class View>
172 int
173 VariableSequenceSymmetryImp<View>
174 ::getVal(unsigned int sequence, unsigned int position) const {
175 return indices[sequence*seq_size + position];
176 }
177
178 template <class View>
179 VariableSequenceSymmetryImp<View>
180 ::VariableSequenceSymmetryImp(Space& home, int* _indices, unsigned int n,
181 unsigned int seqsize)
182 : n_indices(n), seq_size(seqsize), n_seqs(n/seqsize) {
183 indices = home.alloc<unsigned int>(n_indices);
184 unsigned int max_index = _indices[0];
185 for (unsigned int i = 0 ; i < n_indices ; i++) {
186 indices[i] = _indices[i];
187 if (indices[i] > max_index)
188 max_index = indices[i];
189 }
190
191 lookup_size = max_index+1;
192 lookup = home.alloc<int>(lookup_size);
193 for (unsigned int i = 0 ; i < lookup_size ; i++)
194 lookup[i] = -1;
195 for (unsigned int i = 0 ; i < n_indices ; i++) {
196 if (lookup[indices[i]] == -1)
197 lookup[indices[i]] = i;
198 }
199 }
200
201 template <class View>
202 VariableSequenceSymmetryImp<View>
203 ::VariableSequenceSymmetryImp(Space& home,
204 const VariableSequenceSymmetryImp& s)
205 : n_indices(s.n_indices), seq_size(s.seq_size), n_seqs(s.n_seqs),
206 lookup_size(s.lookup_size) {
207 indices = home.alloc<unsigned int>(n_indices);
208 memcpy(indices, s.indices, n_indices * sizeof(int));
209 lookup = home.alloc<int>(lookup_size);
210 memcpy(lookup, s.lookup, lookup_size * sizeof(int));
211 }
212
213 template <class View>
214 size_t
215 VariableSequenceSymmetryImp<View>
216 ::dispose(Space& home) {
217 home.free<unsigned int>(indices, n_indices);
218 home.free<int>(lookup, lookup_size);
219 return sizeof(*this);
220 }
221
222 /// Compute symmetric literals
223 template <class View>
224 ArgArray<Literal>
225 VariableSequenceSymmetryImp<View>
226 ::symmetric(Literal l, const ViewArray<View>& x) const {
227 Region region;
228 Support::DynamicStack<Literal,Region> s(region);
229 if (l._variable < (int)lookup_size) {
230 int posIt = lookup[l._variable];
231 if (posIt == -1) {
232 return dynamicStackToArgArray(s);
233 }
234 unsigned int seqNum = posIt / seq_size;
235 unsigned int seqPos = posIt % seq_size;
236 for (unsigned int seq = 0 ; seq < n_seqs ; seq++) {
237 if (seq == seqNum) {
238 continue;
239 }
240 if (x[getVal(seq, seqPos)].assigned()) {
241 continue;
242 }
243 bool active = true;
244 const unsigned int *firstSeq = &indices[seqNum*seq_size];
245 const unsigned int *secondSeq = &indices[seq*seq_size];
246 for (unsigned int i = 0 ; i < seq_size ; i++) {
247 const View& xv = x[firstSeq[i]];
248 const View& yv = x[secondSeq[i]];
249 if ((!xv.assigned() && !yv.assigned())
250 || (xv.assigned() && yv.assigned() && xv.val() == yv.val())) {
251 continue;
252 } else {
253 active = false;
254 break;
255 }
256 }
257
258 if (active) {
259 s.push(Literal(secondSeq[seqPos], l._value));
260 }
261 }
262 }
263 return dynamicStackToArgArray(s);
264 }
265
266
267 template <class View>
268 void
269 VariableSequenceSymmetryImp<View>
270 ::update(Literal l) {
271 // Do nothing.
272 (void) l;
273 }
274
275 template <class View>
276 SymmetryImp<View>*
277 VariableSequenceSymmetryImp<View>
278 ::copy(Space& home) const {
279 return new (home) VariableSequenceSymmetryImp<View>(home, *this);
280 }
281
282
283
284 template <class View>
285 int
286 ValueSequenceSymmetryImp<View>
287 ::getVal(unsigned int sequence, unsigned int position) const {
288 return values[sequence*seq_size + position];
289 }
290
291 template <class View>
292 ValueSequenceSymmetryImp<View>
293 ::ValueSequenceSymmetryImp(Space& home, int* _values, unsigned int n,
294 unsigned int seqsize)
295 : n_values(n), seq_size(seqsize), n_seqs(n/seqsize),
296 dead_sequences(home, n_seqs) {
297 values = home.alloc<int>(n_values);
298 for (unsigned int i = 0 ; i < n_values ; i++)
299 values[i] = _values[i];
300 }
301
302 template <class View>
303 ValueSequenceSymmetryImp<View>
304 ::ValueSequenceSymmetryImp(Space& home,
305 const ValueSequenceSymmetryImp<View>& vss)
306 : n_values(vss.n_values),
307 seq_size(vss.seq_size),
308 n_seqs(vss.n_seqs),
309 dead_sequences(home, vss.dead_sequences) {
310 values = home.alloc<int>(n_values);
311 for (unsigned int i = 0 ; i < n_values ; i++)
312 values[i] = vss.values[i];
313 }
314
315 template <class View>
316 size_t
317 ValueSequenceSymmetryImp<View>
318 ::dispose(Space& home) {
319 home.free(values, n_values);
320 return sizeof(*this);
321 }
322
323 template <class View>
324 void
325 ValueSequenceSymmetryImp<View>
326 ::update(Literal l) {
327 unsigned int seq = 0;
328 unsigned int pos = 0;
329 for (unsigned int i = 0 ; i < n_values ; i++) {
330 if (values[i] == l._value) {
331 dead_sequences.set(seq);
332 // TODO: This can be slightly optimised.
333 while (pos < seq_size) {
334 i++;
335 pos++;
336 }
337 }
338 pos++;
339 if (pos == seq_size) {
340 pos = 0;
341 seq++;
342 }
343 }
344 }
345
346 template <class View>
347 SymmetryImp<View>*
348 ValueSequenceSymmetryImp<View>
349 ::copy(Space& home) const {
350 return new (home) ValueSequenceSymmetryImp<View>(home, *this);
351 }
352
353}}}
354
355// STATISTICS: int-branch