Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

GAP 4.8.9 installation with standard packages -- copy to your CoCalc project to get it

564010 views
1
/*
2
* Normaliz
3
* Copyright (C) 2007-2014 Winfried Bruns, Bogdan Ichim, Christof Soeger
4
* This program is free software: you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation, either version 3 of the License, or
7
* (at your option) any later version.
8
*
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
13
*
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
16
*
17
* As an exception, when this program is distributed through (i) the App Store
18
* by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play
19
* by Google Inc., then that store may impose any digital rights management,
20
* device limits and/or redistribution restrictions that are required by its
21
* terms of service.
22
*/
23
24
//---------------------------------------------------------------------------
25
26
#include <stdlib.h>
27
#include <set>
28
#include <map>
29
#include <iostream>
30
#include <string>
31
#include <algorithm>
32
#include <time.h>
33
#include <deque>
34
35
#include "libnormaliz/full_cone.h"
36
#include "libnormaliz/cone_helper.h"
37
#include "libnormaliz/vector_operations.h"
38
#include "libnormaliz/list_operations.h"
39
#include "libnormaliz/map_operations.h"
40
#include "libnormaliz/my_omp.h"
41
#include "libnormaliz/integer.h"
42
// #include "libnormaliz/sublattice_representation.h"
43
#include "libnormaliz/offload_handler.h"
44
45
//---------------------------------------------------------------------------
46
47
// const size_t RecBoundTriang=1000000; // if number(supphyps)*size(triang) > RecBoundTriang
48
// we pass to (non-recirsive) pyramids
49
// now in build_cone
50
51
const size_t EvalBoundTriang=2500000; // if more than EvalBoundTriang simplices have been stored
52
// evaluation is started (whenever possible)
53
54
const size_t EvalBoundPyr=200000; // the same for stored pyramids of level > 0
55
56
const size_t EvalBoundLevel0Pyr=200000; // 1000000; // the same for stored level 0 pyramids
57
58
// const size_t EvalBoundRecPyr=200000; // the same for stored RECURSIVE pyramids
59
60
// const size_t IntermedRedBoundHB=2000000; // bound for number of HB elements before
61
// intermediate reduction is called
62
63
const int largePyramidFactor=20; // pyramid is large if largePyramidFactor*Comparisons[Pyramid_key.size()-dim] > old_nr_supp_hyps
64
65
const int SuppHypRecursionFactor=200; // pyramids for supphyps formed if Pos*Neg > ...
66
67
const size_t RAM_Size=1000000000; // we assume that there is at least 1 GB of RAM
68
69
const long GMP_time_factor=10; // factor by which GMP arithmetic differs from long long
70
71
//---------------------------------------------------------------------------
72
73
namespace libnormaliz {
74
using namespace std;
75
76
//---------------------------------------------------------------------------
77
//private
78
//---------------------------------------------------------------------------
79
80
template<typename Integer>
81
void Full_Cone<Integer>::check_simpliciality_hyperplane(const FACETDATA& hyp) const{
82
size_t nr_gen_in_hyp=0;
83
for(size_t i=0; i<nr_gen;++i)
84
if(in_triang[i]&& hyp.GenInHyp.test(i))
85
nr_gen_in_hyp++;
86
if((hyp.simplicial && nr_gen_in_hyp!=dim-2) || (!hyp.simplicial && nr_gen_in_hyp==dim-2)){
87
// NOTE: in_triang set at END of main loop in build_cone
88
errorOutput() << "Simplicial " << hyp.simplicial << " dim " << dim << " gen_in_hyp " << nr_gen_in_hyp << endl;
89
assert(false);
90
}
91
}
92
93
template<typename Integer>
94
void Full_Cone<Integer>::set_simplicial(FACETDATA& hyp){
95
size_t nr_gen_in_hyp=0;
96
for(size_t i=0; i<nr_gen;++i)
97
if(in_triang[i]&& hyp.GenInHyp.test(i))
98
nr_gen_in_hyp++;
99
hyp.simplicial=(nr_gen_in_hyp==dim-2);
100
}
101
102
template<typename Integer>
103
void Full_Cone<Integer>::number_hyperplane(FACETDATA& hyp, const size_t born_at, const size_t mother){
104
// add identifying number, the birth day and the number of mother
105
106
hyp.Mother=mother;
107
hyp.BornAt=born_at;
108
if(!multithreaded_pyramid){
109
hyp.Ident=HypCounter[0];
110
HypCounter[0]++;
111
return;
112
}
113
114
int tn;
115
if(omp_get_level()==omp_start_level)
116
tn=0;
117
else
118
tn = omp_get_ancestor_thread_num(omp_start_level+1);
119
hyp.Ident=HypCounter[tn];
120
HypCounter[tn]+=omp_get_max_threads();
121
122
}
123
124
//---------------------------------------------------------------------------
125
126
template<typename Integer>
127
bool Full_Cone<Integer>::is_hyperplane_included(FACETDATA& hyp) {
128
if (!is_pyramid) { // in the topcone we always have ov_sp > 0
129
return true;
130
}
131
//check if it would be an excluded hyperplane
132
Integer ov_sp = v_scalar_product(hyp.Hyp,Order_Vector);
133
if (ov_sp > 0) {
134
return true;
135
} else if (ov_sp == 0) {
136
for (size_t i=0; i<dim; i++) {
137
if (hyp.Hyp[i]>0) {
138
return true;
139
} else if (hyp.Hyp[i]<0) {
140
return false;
141
}
142
}
143
}
144
return false;
145
}
146
147
//---------------------------------------------------------------------------
148
149
template<typename Integer>
150
void Full_Cone<Integer>::add_hyperplane(const size_t& new_generator, const FACETDATA & positive,const FACETDATA & negative,
151
list<FACETDATA>& NewHyps, bool known_to_be_simplicial){
152
// adds a new hyperplane found in find_new_facets to this cone (restricted to generators processed)
153
154
size_t k;
155
156
FACETDATA NewFacet; NewFacet.Hyp.resize(dim); NewFacet.GenInHyp.resize(nr_gen);
157
NewFacet.is_positive_on_all_original_gens=false;
158
NewFacet.is_negative_on_some_original_gen=false;
159
160
for (k = 0; k <dim; k++) {
161
NewFacet.Hyp[k]=positive.ValNewGen*negative.Hyp[k]-negative.ValNewGen*positive.Hyp[k];
162
if(!check_range(NewFacet.Hyp[k]))
163
break;
164
}
165
166
if(k==dim)
167
v_make_prime(NewFacet.Hyp);
168
else{
169
#pragma omp atomic
170
GMP_hyp++;
171
vector<mpz_class> mpz_neg(dim), mpz_pos(dim), mpz_sum(dim);
172
convert(mpz_neg, negative.Hyp);
173
convert(mpz_pos, positive.Hyp);
174
for (k = 0; k <dim; k++)
175
mpz_sum[k]=convertTo<mpz_class>(positive.ValNewGen)*mpz_neg[k]-convertTo<mpz_class>(negative.ValNewGen)*mpz_pos[k];
176
v_make_prime(mpz_sum);
177
convert(NewFacet.Hyp, mpz_sum);
178
}
179
180
NewFacet.ValNewGen=0;
181
NewFacet.GenInHyp=positive.GenInHyp & negative.GenInHyp; // new hyperplane contains old gen iff both pos and neg do
182
if(known_to_be_simplicial){
183
NewFacet.simplicial=true;
184
check_simpliciality_hyperplane(NewFacet);
185
}
186
else
187
set_simplicial(NewFacet);
188
NewFacet.GenInHyp.set(new_generator); // new hyperplane contains new generator
189
number_hyperplane(NewFacet,nrGensInCone,positive.Ident);
190
191
NewHyps.push_back(NewFacet);
192
}
193
194
195
//---------------------------------------------------------------------------
196
197
198
template<typename Integer>
199
void Full_Cone<Integer>::find_new_facets(const size_t& new_generator){
200
// our Fourier-Motzkin implementation
201
// the special treatment of simplicial facets was inserted because of line shellings.
202
// At present these are not computed.
203
204
//to see if possible to replace the function .end with constant iterator since push-back is performed.
205
206
// for dimension 0 and 1 F-M is never necessary and can lead to problems
207
// when using dim-2
208
if (dim <= 1)
209
return;
210
211
// NEW: new_generator is the index of the generator being inserted
212
213
size_t i,k,nr_zero_i;
214
size_t subfacet_dim=dim-2; // NEW dimension of subfacet
215
size_t facet_dim=dim-1; // NEW dimension of facet
216
217
const bool tv_verbose = false; //verbose && !is_pyramid; // && Support_Hyperplanes.nr_of_rows()>10000; //verbose in this method call
218
219
220
// preparing the computations, the various types of facets are sorted into the deques
221
deque <FACETDATA*> Pos_Simp,Pos_Non_Simp;
222
deque <FACETDATA*> Neg_Simp,Neg_Non_Simp;
223
deque <FACETDATA*> Neutral_Simp, Neutral_Non_Simp;
224
225
boost::dynamic_bitset<> Zero_Positive(nr_gen),Zero_Negative(nr_gen); // here we collect the vertices that lie in a
226
// postive resp. negative hyperplane
227
228
bool simplex;
229
230
if (tv_verbose) verboseOutput()<<"transform_values:"<<flush;
231
232
typename list<FACETDATA>::iterator ii = Facets.begin();
233
234
for (; ii != Facets.end(); ++ii) {
235
// simplex=true;
236
// nr_zero_i=0;
237
simplex=ii->simplicial; // at present simplicial, will become nonsimplicial if neutral
238
/* for (size_t j=0; j<nr_gen; j++){
239
if (ii->GenInHyp.test(j)) {
240
if (++nr_zero_i > facet_dim) {
241
simplex=false;
242
break;
243
}
244
}
245
}*/
246
247
if (ii->ValNewGen==0) {
248
ii->GenInHyp.set(new_generator); // Must be set explicitly !!
249
ii->simplicial=false; // simpliciality definitly gone with the new generator
250
if (simplex) {
251
Neutral_Simp.push_back(&(*ii)); // simplicial without the new generator
252
} else {
253
Neutral_Non_Simp.push_back(&(*ii)); // nonsim�plicial already without the new generator
254
}
255
}
256
else if (ii->ValNewGen>0) {
257
Zero_Positive |= ii->GenInHyp;
258
if (simplex) {
259
Pos_Simp.push_back(&(*ii));
260
} else {
261
Pos_Non_Simp.push_back(&(*ii));
262
}
263
}
264
else if (ii->ValNewGen<0) {
265
Zero_Negative |= ii->GenInHyp;
266
if (simplex) {
267
Neg_Simp.push_back(&(*ii));
268
} else {
269
Neg_Non_Simp.push_back(&(*ii));
270
}
271
}
272
}
273
274
// TO DO: Negativliste mit Zero_Positive verfeinern, also die aussondern, die nicht genug positive Erz enthalten
275
// Eventuell sogar Rang-Test einbauen.
276
// Letzteres könnte man auch bei den positiven machen, bevor sie verarbeitet werden
277
278
boost::dynamic_bitset<> Zero_PN(nr_gen);
279
Zero_PN = Zero_Positive & Zero_Negative;
280
281
size_t nr_PosSimp = Pos_Simp.size();
282
size_t nr_PosNonSimp = Pos_Non_Simp.size();
283
size_t nr_NegSimp = Neg_Simp.size();
284
size_t nr_NegNonSimp = Neg_Non_Simp.size();
285
size_t nr_NeuSimp = Neutral_Simp.size();
286
size_t nr_NeuNonSimp = Neutral_Non_Simp.size();
287
288
if (tv_verbose) verboseOutput()<<" PS "<<nr_PosSimp<<", P "<<nr_PosNonSimp<<", NS "<<nr_NegSimp<<", N "<<nr_NegNonSimp<<", ZS "<<nr_NeuSimp<<", Z "<<nr_NeuNonSimp<<endl;
289
290
if (tv_verbose) verboseOutput()<<"transform_values: subfacet of NS: "<<flush;
291
292
vector< list<pair < boost::dynamic_bitset<>, int> > > Neg_Subfacet_Multi(omp_get_max_threads()) ;
293
294
boost::dynamic_bitset<> zero_i, subfacet;
295
296
// This parallel region cannot throw a NormalizException
297
#pragma omp parallel for private(zero_i,subfacet,k,nr_zero_i)
298
for (i=0; i<nr_NegSimp;i++){
299
zero_i=Zero_PN & Neg_Simp[i]->GenInHyp;
300
301
nr_zero_i=0;
302
for(size_t j=0;j<nr_gen;j++){
303
if(zero_i.test(j))
304
nr_zero_i++;
305
if(nr_zero_i>subfacet_dim){
306
break;
307
}
308
}
309
310
if(nr_zero_i==subfacet_dim) // NEW This case treated separately
311
Neg_Subfacet_Multi[omp_get_thread_num()].push_back(pair <boost::dynamic_bitset<>, int> (zero_i,i));
312
313
if(nr_zero_i==facet_dim){
314
for (k =0; k<nr_gen; k++) {
315
if(zero_i.test(k)) {
316
subfacet=zero_i;
317
subfacet.reset(k); // remove k-th element from facet to obtain subfacet
318
Neg_Subfacet_Multi[omp_get_thread_num()].push_back(pair <boost::dynamic_bitset<>, int> (subfacet,i));
319
}
320
}
321
}
322
}
323
324
list < pair < boost::dynamic_bitset<>, int> > Neg_Subfacet_Multi_United;
325
for(int i=0;i<omp_get_max_threads();++i)
326
Neg_Subfacet_Multi_United.splice(Neg_Subfacet_Multi_United.begin(),Neg_Subfacet_Multi[i]);
327
Neg_Subfacet_Multi_United.sort();
328
329
330
if (tv_verbose) verboseOutput()<<Neg_Subfacet_Multi_United.size() << ", " << flush;
331
332
list< pair < boost::dynamic_bitset<>, int > >::iterator jj;
333
list< pair < boost::dynamic_bitset<>, int > >::iterator del;
334
jj =Neg_Subfacet_Multi_United.begin(); // remove negative subfacets shared
335
while (jj!= Neg_Subfacet_Multi_United.end()) { // by two neg simpl facets
336
del=jj++;
337
if (jj!=Neg_Subfacet_Multi_United.end() && (*jj).first==(*del).first) { //delete since is the intersection of two negative simplicies
338
Neg_Subfacet_Multi_United.erase(del);
339
del=jj++;
340
Neg_Subfacet_Multi_United.erase(del);
341
}
342
}
343
344
size_t nr_NegSubfMult = Neg_Subfacet_Multi_United.size();
345
if (tv_verbose) verboseOutput() << nr_NegSubfMult << ", " << flush;
346
347
vector<list<FACETDATA> > NewHypsSimp(nr_PosSimp);
348
vector<list<FACETDATA> > NewHypsNonSimp(nr_PosNonSimp);
349
350
map < boost::dynamic_bitset<>, int > Neg_Subfacet;
351
size_t nr_NegSubf=0;
352
353
// size_t NrMatches=0, NrCSF=0, NrRank=0, NrComp=0, NrNewF=0;
354
355
/* deque<bool> Indi(nr_NegNonSimp);
356
for(size_t j=0;j<nr_NegNonSimp;++j)
357
Indi[j]=false; */
358
359
if(multithreaded_pyramid){
360
#pragma omp atomic
361
nrTotalComparisons+=nr_NegNonSimp*nr_PosNonSimp;
362
}
363
else{
364
nrTotalComparisons+=nr_NegNonSimp*nr_PosNonSimp;
365
}
366
367
368
//=====================================================================
369
// parallel from here
370
371
bool skip_remaining = false;
372
#ifndef NCATCH
373
std::exception_ptr tmp_exception;
374
#endif
375
376
#pragma omp parallel private(jj)
377
{
378
size_t i,j,k,nr_zero_i;
379
boost::dynamic_bitset<> subfacet(dim-2);
380
jj = Neg_Subfacet_Multi_United.begin();
381
size_t jjpos=0;
382
int tn = omp_get_ancestor_thread_num(omp_start_level+1);
383
384
bool found;
385
// This for region cannot throw a NormalizException
386
#pragma omp for schedule(dynamic)
387
for (size_t j=0; j<nr_NegSubfMult; ++j) { // remove negative subfacets shared
388
for(;j > jjpos; ++jjpos, ++jj) ; // by non-simpl neg or neutral facets
389
for(;j < jjpos; --jjpos, --jj) ;
390
391
subfacet=(*jj).first;
392
found=false;
393
for (i = 0; i <nr_NeuSimp; i++) {
394
found=subfacet.is_subset_of(Neutral_Simp[i]->GenInHyp);
395
if(found)
396
break;
397
}
398
if (!found) {
399
for (i = 0; i <nr_NeuNonSimp; i++) {
400
found=subfacet.is_subset_of(Neutral_Non_Simp[i]->GenInHyp);
401
if(found)
402
break;
403
}
404
if(!found) {
405
for (i = 0; i <nr_NegNonSimp; i++) {
406
found=subfacet.is_subset_of(Neg_Non_Simp[i]->GenInHyp);
407
if(found)
408
break;
409
}
410
}
411
}
412
if (found) {
413
jj->second=-1;
414
}
415
}
416
417
#pragma omp single
418
{ //remove elements that where found in the previous loop
419
jj = Neg_Subfacet_Multi_United.begin();
420
map < boost::dynamic_bitset<>, int > ::iterator last_inserted=Neg_Subfacet.begin(); // used to speedup insertion into the new map
421
for (; jj!= Neg_Subfacet_Multi_United.end(); ++jj) {
422
if ((*jj).second != -1) {
423
last_inserted = Neg_Subfacet.insert(last_inserted,*jj);
424
}
425
}
426
nr_NegSubf=Neg_Subfacet.size();
427
}
428
429
#pragma omp single nowait
430
{Neg_Subfacet_Multi_United.clear();}
431
432
433
boost::dynamic_bitset<> zero_i(nr_gen);
434
map <boost::dynamic_bitset<>, int> ::iterator jj_map;
435
436
437
#pragma omp single nowait
438
if (tv_verbose) {
439
verboseOutput() << "PS vs NS and PS vs N , " << flush;
440
}
441
442
vector<key_t> key(nr_gen);
443
size_t nr_missing;
444
bool common_subfacet;
445
// we cannot use nowait here because of the way we handle exceptions in this loop
446
#pragma omp for schedule(dynamic) //nowait
447
for (size_t i =0; i<nr_PosSimp; i++){
448
449
if (skip_remaining) continue;
450
#ifndef NCATCH
451
try {
452
#endif
453
454
INTERRUPT_COMPUTATION_BY_EXCEPTION
455
456
zero_i=Zero_PN & Pos_Simp[i]->GenInHyp;
457
nr_zero_i=0;
458
for(j=0;j<nr_gen && nr_zero_i<=facet_dim;j++)
459
if(zero_i.test(j)){
460
key[nr_zero_i]=j;
461
nr_zero_i++;
462
}
463
464
if(nr_zero_i<subfacet_dim)
465
continue;
466
467
// first PS vs NS
468
469
if (nr_zero_i==subfacet_dim) { // NEW slight change in logic. Positive simpl facet shared at most
470
jj_map=Neg_Subfacet.find(zero_i); // one subfacet with negative simpl facet
471
if (jj_map!=Neg_Subfacet.end()) {
472
add_hyperplane(new_generator,*Pos_Simp[i],*Neg_Simp[(*jj_map).second],NewHypsSimp[i],true);
473
(*jj_map).second = -1; // block subfacet in further searches
474
}
475
}
476
if (nr_zero_i==facet_dim){ // now there could be more such subfacets. We make all and search them.
477
for (k =0; k<nr_gen; k++) { // BOOST ROUTINE
478
if(zero_i.test(k)) {
479
subfacet=zero_i;
480
subfacet.reset(k); // remove k-th element from facet to obtain subfacet
481
jj_map=Neg_Subfacet.find(subfacet);
482
if (jj_map!=Neg_Subfacet.end()) {
483
add_hyperplane(new_generator,*Pos_Simp[i],*Neg_Simp[(*jj_map).second],NewHypsSimp[i],true);
484
(*jj_map).second = -1;
485
// Indi[j]=true;
486
}
487
}
488
}
489
}
490
491
// now PS vs N
492
493
for (j=0; j<nr_NegNonSimp; j++){ // search negative facet with common subfacet
494
nr_missing=0;
495
common_subfacet=true;
496
for(k=0;k<nr_zero_i;k++) {
497
if(!Neg_Non_Simp[j]->GenInHyp.test(key[k])) {
498
nr_missing++;
499
if(nr_missing==2 || nr_zero_i==subfacet_dim) {
500
common_subfacet=false;
501
break;
502
}
503
}
504
}
505
506
if(common_subfacet){
507
add_hyperplane(new_generator,*Pos_Simp[i],*Neg_Non_Simp[j],NewHypsSimp[i],true);
508
if(nr_zero_i==subfacet_dim) // only one subfacet can lie in negative hyperplane
509
break;
510
}
511
}
512
#ifndef NCATCH
513
} catch(const std::exception& ) {
514
tmp_exception = std::current_exception();
515
skip_remaining = true;
516
#pragma omp flush(skip_remaining)
517
}
518
#endif
519
520
} // PS vs NS and PS vs N
521
522
if (!skip_remaining) {
523
#pragma omp single nowait
524
if (tv_verbose) {
525
verboseOutput() << "P vs NS and P vs N" << endl;
526
}
527
528
list<FACETDATA*> AllNonSimpHyp;
529
typename list<FACETDATA*>::iterator a;
530
531
for(i=0;i<nr_PosNonSimp;++i)
532
AllNonSimpHyp.push_back(&(*Pos_Non_Simp[i]));
533
for(i=0;i<nr_NegNonSimp;++i)
534
AllNonSimpHyp.push_back(&(*Neg_Non_Simp[i]));
535
for(i=0;i<nr_NeuNonSimp;++i)
536
AllNonSimpHyp.push_back(&(*Neutral_Non_Simp[i]));
537
size_t nr_NonSimp = nr_PosNonSimp+nr_NegNonSimp+nr_NeuNonSimp;
538
539
bool ranktest;
540
FACETDATA *hp_i, *hp_j, *hp_t; // pointers to current hyperplanes
541
542
size_t missing_bound, nr_common_zero;
543
boost::dynamic_bitset<> common_zero(nr_gen);
544
vector<key_t> common_key;
545
common_key.reserve(nr_gen);
546
vector<int> key_start(nrGensInCone);
547
548
#pragma omp for schedule(dynamic) // nowait
549
for (size_t i =0; i<nr_PosNonSimp; i++){ //Positive Non Simp vs.Negative Simp and Non Simp
550
551
if (skip_remaining) continue;
552
553
#ifndef NCATCH
554
try {
555
#endif
556
INTERRUPT_COMPUTATION_BY_EXCEPTION
557
558
jj_map = Neg_Subfacet.begin(); // First the Simp
559
for (j=0; j<nr_NegSubf; ++j,++jj_map) {
560
if ( (*jj_map).second != -1 ) { // skip used subfacets
561
if(jj_map->first.is_subset_of(Pos_Non_Simp[i]->GenInHyp)){
562
add_hyperplane(new_generator,*Pos_Non_Simp[i],*Neg_Simp[(*jj_map).second],NewHypsNonSimp[i],true);
563
(*jj_map).second = -1; // has now been used
564
}
565
}
566
}
567
568
// Now the NonSimp
569
570
hp_i=Pos_Non_Simp[i];
571
zero_i=Zero_PN & hp_i->GenInHyp; // these are the potential vertices in an intersection
572
nr_zero_i=0;
573
int last_existing=-1;
574
for(size_t jj=0;jj<nrGensInCone;jj++) // we make a "key" of the potential vertices in the intersection
575
{
576
j=GensInCone[jj];
577
if(zero_i.test(j)){
578
key[nr_zero_i]=j;
579
for(size_t kk= last_existing+1;kk<=jj;kk++) // used in the extension test
580
key_start[kk]=nr_zero_i; // to find out from which generator on both have existed
581
nr_zero_i++;
582
last_existing= jj;
583
}
584
}
585
if(last_existing< (int)nrGensInCone-1)
586
for(size_t kk=last_existing+1;kk<nrGensInCone;kk++)
587
key_start[kk]=nr_zero_i;
588
589
if (nr_zero_i<subfacet_dim)
590
continue;
591
592
// now nr_zero_i is the number of vertices in hp_i that have a chance to lie in a negative facet
593
// and key contains the indices
594
595
missing_bound=nr_zero_i-subfacet_dim; // at most this number of generators can be missing
596
// to have a chance for common subfacet
597
598
for (j=0; j<nr_NegNonSimp; j++){
599
600
601
hp_j=Neg_Non_Simp[j];
602
603
if(hp_i->Ident==hp_j->Mother || hp_j->Ident==hp_i->Mother){ // mother and daughter coming together
604
add_hyperplane(new_generator,*hp_i,*hp_j,NewHypsNonSimp[i],false); // their intersection is a subfacet
605
continue; // simplicial set in add_hyperplane
606
}
607
608
609
bool extension_test=hp_i->BornAt==hp_j->BornAt || (hp_i->BornAt<hp_j->BornAt && hp_j->Mother!=0)
610
|| (hp_j->BornAt<hp_i->BornAt && hp_i->Mother!=0);
611
612
// extension_test=false;
613
614
size_t both_existing_from=key_start[max(hp_i->BornAt,hp_j->BornAt)];
615
616
nr_missing=0;
617
nr_common_zero=0;
618
common_key.clear();
619
size_t second_loop_bound=nr_zero_i;
620
common_subfacet=true;
621
622
// We use the following criterion:
623
// if the two facets are not mother and daughter (taken care of already), then
624
// they cannot have intersected in a subfacet at the time when the second was born.
625
// In other words: they can only intersect in a subfacet now, if at least one common vertex
626
// has been added after the birth of the younger one.
627
// this is indicated by "extended".
628
629
if(extension_test){
630
bool extended=false;
631
second_loop_bound=both_existing_from; // fisrt we find the common vertices inserted from the step
632
// where both facets existed the first time
633
for(k=both_existing_from;k<nr_zero_i;k++){
634
if(!hp_j->GenInHyp.test(key[k])) {
635
nr_missing++;
636
if(nr_missing>missing_bound) {
637
common_subfacet=false;
638
break;
639
}
640
}
641
else {
642
extended=true; // in this case they have a common vertex added after their common existence
643
common_key.push_back(key[k]);
644
nr_common_zero++;
645
}
646
}
647
648
if(!extended || !common_subfacet) //
649
continue;
650
}
651
652
653
for(k=0;k<second_loop_bound;k++) { // now the remaining
654
if(!hp_j->GenInHyp.test(key[k])) {
655
nr_missing++;
656
if(nr_missing>missing_bound) {
657
common_subfacet=false;
658
break;
659
}
660
}
661
else {
662
common_key.push_back(key[k]);
663
nr_common_zero++;
664
}
665
}
666
667
if(!common_subfacet)
668
continue;
669
/* #pragma omp atomic
670
NrCSF++;*/
671
672
if(using_GMP<Integer>())
673
ranktest = (nr_NonSimp > GMP_time_factor*dim*dim*nr_common_zero/3); // in this case the rank computation takes longer
674
else
675
ranktest = (nr_NonSimp > dim*dim*nr_common_zero/3);
676
// ranktest=true;
677
678
if(ranktest) { // cout << "Rang" << endl;
679
680
/* #pragma omp atomic
681
NrRank++; */
682
683
Matrix<Integer>& Test = Top_Cone->RankTest[tn];
684
if (Test.rank_submatrix(Generators,common_key)<subfacet_dim) {
685
common_subfacet=false;
686
}
687
} // ranktest
688
else{ // now the comparison test
689
690
/* #pragma omp atomic
691
NrComp++; */
692
693
common_zero = zero_i & hp_j->GenInHyp;
694
for (a=AllNonSimpHyp.begin();a!=AllNonSimpHyp.end();++a){
695
hp_t=*a;
696
if ((hp_t!=hp_i) && (hp_t!=hp_j) && common_zero.is_subset_of(hp_t->GenInHyp)) {
697
common_subfacet=false;
698
AllNonSimpHyp.splice(AllNonSimpHyp.begin(),AllNonSimpHyp,a); // for the "darwinistic" mewthod
699
break;
700
}
701
}
702
} // else
703
if (common_subfacet) { //intersection of i and j is a subfacet
704
add_hyperplane(new_generator,*hp_i,*hp_j,NewHypsNonSimp[i],false); //simplicial set in add_hyperplane
705
/* #pragma omp atomic
706
NrNewF++; */
707
// Indi[j]=true;
708
}
709
}
710
#ifndef NCATCH
711
} catch(const std::exception& ) {
712
tmp_exception = std::current_exception();
713
skip_remaining = true;
714
#pragma omp flush(skip_remaining)
715
}
716
#endif
717
} // end for
718
} // end !skip_remaining
719
} //END parallel
720
721
#ifndef NCATCH
722
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
723
#endif
724
//=====================================================================
725
// parallel until here
726
727
728
/* if(!is_pyramid)
729
cout << "Matches " << NrMatches << " pot. common subf " << NrCSF << " rank test " << NrRank << " comp test "
730
<< NrComp << " neww hyps " << NrNewF << endl; */
731
732
733
for(i=0;i<nr_PosSimp;i++)
734
Facets.splice(Facets.end(),NewHypsSimp[i]);
735
736
for(i=0;i<nr_PosNonSimp;i++)
737
Facets.splice(Facets.end(),NewHypsNonSimp[i]);
738
739
//removing the negative hyperplanes
740
// now done in build_cone
741
742
if (tv_verbose) verboseOutput()<<"transform_values: done"<<endl;
743
744
}
745
746
//---------------------------------------------------------------------------
747
748
template<typename Integer>
749
void Full_Cone<Integer>::extend_triangulation(const size_t& new_generator){
750
// extends the triangulation of this cone by including new_generator
751
// simplicial facets save us from searching the "brother" in the existing triangulation
752
// to which the new simplex gets attached
753
754
size_t listsize =old_nr_supp_hyps; // Facets.size();
755
vector<typename list<FACETDATA>::iterator> visible;
756
visible.reserve(listsize);
757
typename list<FACETDATA>::iterator i = Facets.begin();
758
759
listsize=0;
760
for (; i!=Facets.end(); ++i)
761
if (i->ValNewGen < 0){ // visible facet
762
visible.push_back(i);
763
listsize++;
764
}
765
766
#ifndef NCATCH
767
std::exception_ptr tmp_exception;
768
#endif
769
770
typename list< SHORTSIMPLEX<Integer> >::iterator oldTriBack = --TriangulationBuffer.end();
771
#pragma omp parallel private(i)
772
{
773
size_t k,l;
774
bool one_not_in_i, not_in_facet;
775
size_t not_in_i=0;
776
// size_t facet_dim=dim-1;
777
// size_t nr_in_i=0;
778
779
list< SHORTSIMPLEX<Integer> > Triangulation_kk;
780
typename list< SHORTSIMPLEX<Integer> >::iterator j;
781
782
vector<key_t> key(dim);
783
784
// if we only want a partial triangulation but came here because of a deep level
785
// mark if this part of the triangulation has not to be evaluated
786
bool skip_eval = false;
787
788
#pragma omp for schedule(dynamic)
789
for (size_t kk=0; kk<listsize; ++kk) {
790
791
#ifndef NCATCH
792
try {
793
#endif
794
INTERRUPT_COMPUTATION_BY_EXCEPTION
795
796
i=visible[kk];
797
798
/* nr_in_i=0;
799
for(size_t m=0;m<nr_gen;m++){
800
if(i->GenInHyp.test(m))
801
nr_in_i++;
802
if(nr_in_i>facet_dim){
803
break;
804
}
805
}*/
806
807
skip_eval = Top_Cone->do_partial_triangulation && i->ValNewGen == -1
808
&& is_hyperplane_included(*i);
809
810
if (i->simplicial){ // simplicial
811
l=0;
812
for (k = 0; k <nr_gen; k++) {
813
if (i->GenInHyp[k]==1) {
814
key[l]=k;
815
l++;
816
}
817
}
818
key[dim-1]=new_generator;
819
820
if (skip_eval)
821
store_key(key,0,0,Triangulation_kk);
822
else
823
store_key(key,-i->ValNewGen,0,Triangulation_kk);
824
continue;
825
} // end simplicial
826
827
size_t irrelevant_vertices=0;
828
for(size_t vertex=0;vertex<nrGensInCone;++vertex){
829
830
if(i->GenInHyp[GensInCone[vertex]]==0) // lead vertex not in hyperplane
831
continue;
832
833
if(irrelevant_vertices<dim-2){
834
++irrelevant_vertices;
835
continue;
836
}
837
838
j=TriSectionFirst[vertex];
839
bool done=false;
840
for(;!done;j++)
841
{
842
done=(j==TriSectionLast[vertex]);
843
key=j->key;
844
one_not_in_i=false; // true indicates that one gen of simplex is not in hyperplane
845
not_in_facet=false; // true indicates that a second gen of simplex is not in hyperplane
846
for(k=0;k<dim;k++){
847
if ( !i->GenInHyp.test(key[k])) {
848
if(one_not_in_i){
849
not_in_facet=true;
850
break;
851
}
852
one_not_in_i=true;
853
not_in_i=k;
854
}
855
}
856
857
if(not_in_facet) // simplex does not share facet with hyperplane
858
continue;
859
860
key[not_in_i]=new_generator;
861
if (skip_eval)
862
store_key(key,0,j->vol,Triangulation_kk);
863
else
864
store_key(key,-i->ValNewGen,j->vol,Triangulation_kk);
865
866
} // j
867
868
} // for vertex
869
870
#ifndef NCATCH
871
} catch(const std::exception& ) {
872
tmp_exception = std::current_exception();
873
}
874
#endif
875
876
} // omp for kk
877
878
if (multithreaded_pyramid) {
879
#pragma omp critical(TRIANG)
880
TriangulationBuffer.splice(TriangulationBuffer.end(),Triangulation_kk);
881
} else
882
TriangulationBuffer.splice(TriangulationBuffer.end(),Triangulation_kk);
883
884
} // parallel
885
886
#ifndef NCATCH
887
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
888
#endif
889
890
// GensInCone.push_back(new_generator); // now in extend_cone
891
TriSectionFirst.push_back(++oldTriBack);
892
TriSectionLast.push_back(--TriangulationBuffer.end());
893
}
894
895
//---------------------------------------------------------------------------
896
897
template<typename Integer>
898
void Full_Cone<Integer>::store_key(const vector<key_t>& key, const Integer& height,
899
const Integer& mother_vol, list< SHORTSIMPLEX<Integer> >& Triangulation){
900
// stores a simplex given by key and height in Triangulation
901
// mother_vol is the volume of the simplex to which the new one is attached
902
903
SHORTSIMPLEX<Integer> newsimplex;
904
newsimplex.key=key;
905
newsimplex.height=height;
906
newsimplex.vol=0;
907
908
if(multithreaded_pyramid){
909
#pragma omp atomic
910
TriangulationBufferSize++;
911
}
912
else {
913
TriangulationBufferSize++;
914
}
915
int tn;
916
if(omp_get_level()==omp_start_level)
917
tn=0;
918
else
919
tn = omp_get_ancestor_thread_num(omp_start_level+1);
920
921
if (do_only_multiplicity) {
922
// directly compute the volume
923
if (mother_vol==1)
924
newsimplex.vol = height;
925
// the multiplicity is computed in SimplexEvaluator
926
for(size_t i=0; i<dim; i++) // and needs the key in TopCone numbers
927
newsimplex.key[i]=Top_Key[newsimplex.key[i]];
928
929
if (keep_triangulation)
930
sort(newsimplex.key.begin(),newsimplex.key.end());
931
Top_Cone->SimplexEval[tn].evaluate(newsimplex);
932
// restore the local generator numbering, needed in extend_triangulation
933
newsimplex.key=key;
934
}
935
if (height == 0) Top_Cone->triangulation_is_partial = true;
936
937
if (keep_triangulation){
938
Triangulation.push_back(newsimplex);
939
return;
940
}
941
942
bool Simpl_available=true;
943
944
typename list< SHORTSIMPLEX<Integer> >::iterator F;
945
946
if(Top_Cone->FS[tn].empty()){
947
if (Top_Cone->FreeSimpl.empty()) {
948
Simpl_available=false;
949
} else {
950
#pragma omp critical(FREESIMPL)
951
{
952
if (Top_Cone->FreeSimpl.empty()) {
953
Simpl_available=false;
954
} else {
955
// take 1000 simplices from FreeSimpl or what you can get
956
F = Top_Cone->FreeSimpl.begin();
957
size_t q;
958
for (q = 0; q < 1000; ++q, ++F) {
959
if (F == Top_Cone->FreeSimpl.end())
960
break;
961
}
962
963
if(q<1000)
964
Top_Cone->FS[tn].splice(Top_Cone->FS[tn].begin(),
965
Top_Cone->FreeSimpl);
966
else
967
Top_Cone->FS[tn].splice(Top_Cone->FS[tn].begin(),
968
Top_Cone->FreeSimpl,Top_Cone->FreeSimpl.begin(),F);
969
} // if empty global (critical)
970
} // critical
971
} // if empty global
972
} // if empty thread
973
974
if (Simpl_available) {
975
Triangulation.splice(Triangulation.end(),Top_Cone->FS[tn],
976
Top_Cone->FS[tn].begin());
977
Triangulation.back() = newsimplex;
978
} else {
979
Triangulation.push_back(newsimplex);
980
}
981
}
982
983
//---------------------------------------------------------------------------
984
985
template<typename Integer>
986
void Full_Cone<Integer>::process_pyramids(const size_t new_generator,const bool recursive){
987
988
/*
989
990
We distinguish two types of pyramids:
991
992
(i) recursive pyramids that give their support hyperplanes back to the mother.
993
(ii) independent pyramids that are not linked to the mother.
994
995
The parameter "recursive" indicates whether the pyramids that will be created
996
in process_pyramid(s) are of type (i) or (ii).
997
998
Every pyramid can create subpyramids of both types (not the case in version 2.8 - 2.10).
999
1000
Whether "this" is of type (i) or (ii) is indicated by do_all_hyperplanes.
1001
1002
The creation of (sub)pyramids of type (i) can be blocked by setting recursion_allowed=false.
1003
(Not done in this version.)
1004
1005
is_pyramid==false for the top_cone and ==true else.
1006
1007
multithreaded_pyramid indicates whether parallelization takes place within the
1008
computation of a pyramid or whether it is computed in a single thread (defined in build_cone).
1009
1010
Recursie pyramids are processed immediately after creation (as in 2.8). However, there
1011
are two exceptions:
1012
1013
(a) In order to avoid very long waiting times for the computation of the "large" ones,
1014
these are treated as follows: the support hyperplanes of "this" coming from their bases
1015
(as negative hyperplanes of "this") are computed by matching them with the
1016
positive hyperplanes of "this". This Fourier-Motzkin step is much more
1017
efficient if a pyramid is large. For triangulation a large recursive
1018
pyramid is then stored as a pyramid of type (ii).
1019
1020
(b) If "this" is processed in a parallelized loop calling process_pyramids, then
1021
the loop in process_pyramids cannot be interrupted for the evaluation of simplices. As a
1022
consequence an extremely long lst of simplices could arise if many small subpyramids of "this"
1023
are created in process_pyramids. In order to prevent this dangeous effect, small recursive
1024
subpyramids are stored for later triangulation if the simplex buffer has reached its
1025
size bound.
1026
1027
Pyramids of type (ii) are stpred in Pyramids. The store_level of the created pyramids is 0
1028
for all pyramids created (possibly recursively) from the top cone. Pyramids created
1029
in evaluate_stored_pyramids get the store level for their subpyramids in that routine and
1030
transfer it to their recursive daughters. (correction March 4, 2015).
1031
1032
Note: the top cone has pyr_level=-1. The pyr_level has no algorithmic relevance
1033
at present, but it shows the depth of the pyramid recursion at which the pyramid has been
1034
created.
1035
1036
*/
1037
1038
1039
size_t start_level=omp_get_level(); // allows us to check that we are on level 0
1040
// outside the loop and can therefore call evaluation
1041
// in order to empty the buffers
1042
vector<key_t> Pyramid_key;
1043
Pyramid_key.reserve(nr_gen);
1044
bool skip_triang; // make hyperplanes but skip triangulation (recursive pyramids only)
1045
1046
deque<bool> done(old_nr_supp_hyps,false);
1047
bool skip_remaining;
1048
#ifndef NCATCH
1049
std::exception_ptr tmp_exception;
1050
#endif
1051
typename list< FACETDATA >::iterator hyp;
1052
size_t nr_done=0;
1053
1054
do{ // repeats processing until all hyperplanes have been processed
1055
1056
hyp=Facets.begin();
1057
size_t hyppos=0;
1058
skip_remaining = false;
1059
1060
const long VERBOSE_STEPS = 50;
1061
long step_x_size = old_nr_supp_hyps-VERBOSE_STEPS;
1062
const size_t RepBound=10000;
1063
1064
#pragma omp parallel for private(skip_triang) firstprivate(hyppos,hyp,Pyramid_key) schedule(dynamic) reduction(+: nr_done)
1065
for (size_t kk=0; kk<old_nr_supp_hyps; ++kk) {
1066
1067
if (skip_remaining) continue;
1068
1069
if(verbose && old_nr_supp_hyps>=RepBound){
1070
#pragma omp critical(VERBOSE)
1071
while ((long)(kk*VERBOSE_STEPS) >= step_x_size) {
1072
step_x_size += old_nr_supp_hyps;
1073
verboseOutput() << "." <<flush;
1074
}
1075
}
1076
1077
#ifndef NCATCH
1078
try {
1079
#endif
1080
for(;kk > hyppos; hyppos++, hyp++) ;
1081
for(;kk < hyppos; hyppos--, hyp--) ;
1082
1083
INTERRUPT_COMPUTATION_BY_EXCEPTION
1084
1085
if(done[hyppos])
1086
continue;
1087
1088
done[hyppos]=true;
1089
1090
nr_done++;
1091
1092
if (hyp->ValNewGen == 0){ // MUST BE SET HERE
1093
hyp->GenInHyp.set(new_generator);
1094
if(recursive) hyp->simplicial=false; // in the recursive case
1095
}
1096
1097
if (hyp->ValNewGen >= 0) // facet not visible
1098
continue;
1099
1100
skip_triang = false;
1101
if (Top_Cone->do_partial_triangulation && hyp->ValNewGen>=-1) { //ht1 criterion
1102
skip_triang = is_hyperplane_included(*hyp);
1103
if (skip_triang) {
1104
Top_Cone->triangulation_is_partial = true;
1105
if (!recursive) {
1106
continue;
1107
}
1108
}
1109
}
1110
1111
Pyramid_key.clear(); // make data of new pyramid
1112
Pyramid_key.push_back(new_generator);
1113
for(size_t i=0;i<nr_gen;i++){
1114
if(in_triang[i] && hyp->GenInHyp.test(i)) {
1115
Pyramid_key.push_back(i);
1116
}
1117
}
1118
1119
// now we can store the new pyramid at the right place (or finish the simplicial ones)
1120
if (recursive && skip_triang) { // mark as "do not triangulate"
1121
process_pyramid(Pyramid_key, new_generator,store_level,0, recursive,hyp,start_level);
1122
} else { //default
1123
process_pyramid(Pyramid_key, new_generator,store_level,-hyp->ValNewGen, recursive,hyp,start_level);
1124
}
1125
// interrupt parallel execution if it is really parallel
1126
// to keep the triangulationand pyramid buffers under control
1127
if (start_level==0) {
1128
if (check_evaluation_buffer_size() || Top_Cone->check_pyr_buffer(store_level)) {
1129
skip_remaining = true;
1130
}
1131
}
1132
1133
#ifndef NCATCH
1134
} catch(const std::exception& ) {
1135
tmp_exception = std::current_exception();
1136
skip_remaining = true;
1137
#pragma omp flush(skip_remaining)
1138
}
1139
#endif
1140
} // end parallel loop over hyperplanes
1141
1142
#ifndef NCATCH
1143
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
1144
#endif
1145
1146
if (!omp_in_parallel())
1147
try_offload(0);
1148
1149
if (start_level==0 && check_evaluation_buffer_size()) {
1150
Top_Cone->evaluate_triangulation();
1151
}
1152
1153
if (start_level==0 && Top_Cone->check_pyr_buffer(store_level)) {
1154
Top_Cone->evaluate_stored_pyramids(store_level);
1155
}
1156
1157
if (verbose && old_nr_supp_hyps>=RepBound)
1158
verboseOutput() << endl;
1159
1160
} while (nr_done < old_nr_supp_hyps);
1161
1162
1163
evaluate_large_rec_pyramids(new_generator);
1164
1165
}
1166
1167
//---------------------------------------------------------------------------
1168
1169
template<typename Integer>
1170
void Full_Cone<Integer>::process_pyramid(const vector<key_t>& Pyramid_key,
1171
const size_t new_generator,const size_t store_level, Integer height, const bool recursive,
1172
typename list< FACETDATA >::iterator hyp, size_t start_level){
1173
// processes simplicial pyramids directly, stores other pyramids into their depots
1174
1175
#pragma omp atomic
1176
Top_Cone->totalNrPyr++;
1177
1178
if(Pyramid_key.size()==dim){ // simplicial pyramid completely done here
1179
#pragma omp atomic // only for saving memory
1180
Top_Cone->nrSimplicialPyr++;
1181
if(recursive){ // the facets may be facets of the mother cone and if recursive==true must be given back
1182
Matrix<Integer> H(dim,dim);
1183
Integer dummy_vol;
1184
Generators.simplex_data(Pyramid_key,H, dummy_vol,false);
1185
list<FACETDATA> NewFacets;
1186
FACETDATA NewFacet;
1187
NewFacet.GenInHyp.resize(nr_gen);
1188
for (size_t i=0; i<dim;i++) {
1189
NewFacet.Hyp = H[i];
1190
NewFacet.GenInHyp.set();
1191
NewFacet.GenInHyp.reset(i);
1192
NewFacet.simplicial=true;
1193
NewFacet.is_positive_on_all_original_gens=false;
1194
NewFacet.is_negative_on_some_original_gen=false;
1195
NewFacets.push_back(NewFacet);
1196
}
1197
select_supphyps_from(NewFacets,new_generator,Pyramid_key); // takes itself care of multithreaded_pyramid
1198
}
1199
if (height != 0 && (do_triangulation || do_partial_triangulation)) {
1200
if(multithreaded_pyramid) {
1201
#ifndef NCATCH
1202
std::exception_ptr tmp_exception;
1203
#endif
1204
#pragma omp critical(TRIANG)
1205
{
1206
#ifndef NCATCH
1207
try{
1208
#endif
1209
store_key(Pyramid_key,height,0,TriangulationBuffer);
1210
nrTotalComparisons+=dim*dim/2;
1211
#ifndef NCATCH
1212
} catch(const std::exception& ) {
1213
tmp_exception = std::current_exception();
1214
}
1215
#endif
1216
} // end critical
1217
#ifndef NCATCH
1218
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
1219
#endif
1220
} else {
1221
store_key(Pyramid_key,height,0,TriangulationBuffer);
1222
nrTotalComparisons+=dim*dim/2;
1223
}
1224
}
1225
}
1226
else { // non-simplicial
1227
1228
bool large=(largePyramidFactor*Comparisons[Pyramid_key.size()-dim] > old_nr_supp_hyps); // Pyramid_key.size()>largePyramidFactor*dim;
1229
1230
if (!recursive || (large && (do_triangulation || do_partial_triangulation) && height!=0) ) { // must also store for triangulation if recursive and large
1231
vector<key_t> key_wrt_top(Pyramid_key.size());
1232
for(size_t i=0;i<Pyramid_key.size();i++)
1233
key_wrt_top[i]=Top_Key[Pyramid_key[i]];
1234
#pragma omp critical(STOREPYRAMIDS)
1235
{
1236
// cout << "store_level " << store_level << " large " << large << " pyr level " << pyr_level << endl;
1237
Top_Cone->Pyramids[store_level].push_back(key_wrt_top);
1238
Top_Cone->nrPyramids[store_level]++;
1239
} // critical
1240
if(!recursive) // in this case we need only store for future triangulation, and that has been done
1241
return;
1242
}
1243
// now we are in the recursive case and must compute support hyperplanes of the subpyramid
1244
if(large){ // large recursive pyramid
1245
if(multithreaded_pyramid){
1246
#pragma omp critical(LARGERECPYRS)
1247
LargeRecPyrs.push_back(*hyp); // LargeRecPyrs are kept and evaluated locally
1248
}
1249
else
1250
LargeRecPyrs.push_back(*hyp);
1251
return; // done with the large recusive pyramids
1252
}
1253
1254
// only recursive small ones left
1255
1256
Full_Cone<Integer> Pyramid(*this,Pyramid_key);
1257
Pyramid.Mother = this;
1258
Pyramid.Mother_Key = Pyramid_key; // need these data to give back supphyps
1259
Pyramid.apex=new_generator;
1260
if (height == 0) { //indicates "do not triangulate"
1261
Pyramid.do_triangulation = false;
1262
Pyramid.do_partial_triangulation = false;
1263
Pyramid.do_Hilbert_basis = false;
1264
Pyramid.do_deg1_elements=false;
1265
}
1266
1267
bool store_for_triangulation=(store_level!=0) //loop in process_pyramids cannot be interrupted
1268
&& (Pyramid.do_triangulation || Pyramid.do_partial_triangulation) // we must (partially) triangulate
1269
&& (start_level!=0 && Top_Cone->TriangulationBufferSize > 2*EvalBoundTriang); // evaluation buffer already full // EvalBoundTriang
1270
1271
if (store_for_triangulation) {
1272
vector<key_t> key_wrt_top(Pyramid_key.size());
1273
for(size_t i=0;i<Pyramid_key.size();i++)
1274
key_wrt_top[i]=Top_Key[Pyramid_key[i]];
1275
#pragma omp critical(STOREPYRAMIDS)
1276
{
1277
Top_Cone->Pyramids[store_level].push_back(key_wrt_top);
1278
Top_Cone->nrPyramids[store_level]++;
1279
} // critical
1280
// Now we must suppress immediate triangulation
1281
Pyramid.do_triangulation = false;
1282
Pyramid.do_partial_triangulation = false;
1283
Pyramid.do_Hilbert_basis = false;
1284
Pyramid.do_deg1_elements=false;
1285
}
1286
1287
Pyramid.build_cone();
1288
1289
if(multithreaded_pyramid){
1290
#pragma omp atomic
1291
nrTotalComparisons+=Pyramid.nrTotalComparisons;
1292
} else
1293
nrTotalComparisons+=Pyramid.nrTotalComparisons;
1294
} // else non-simplicial
1295
}
1296
1297
1298
//---------------------------------------------------------------------------
1299
1300
template<typename Integer>
1301
void Full_Cone<Integer>::find_and_evaluate_start_simplex(){
1302
1303
size_t i,j;
1304
Integer factor;
1305
1306
1307
/* Simplex<Integer> S = find_start_simplex();
1308
vector<key_t> key=S.read_key(); // generators indexed from 0 */
1309
1310
vector<key_t> key=find_start_simplex();
1311
assert(key.size()==dim); // safety heck
1312
if(verbose){
1313
verboseOutput() << "Start simplex ";
1314
for(size_t i=0;i<key.size();++i)
1315
verboseOutput() << key[i]+1 << " ";
1316
verboseOutput() << endl;
1317
}
1318
Matrix<Integer> H(dim,dim);
1319
Integer vol;
1320
Generators.simplex_data(key,H,vol,do_partial_triangulation || do_triangulation);
1321
1322
// H.pretty_print(cout);
1323
1324
1325
for (i = 0; i < dim; i++) {
1326
in_triang[key[i]]=true;
1327
GensInCone.push_back(key[i]);
1328
if (deg1_triangulation && isComputed(ConeProperty::Grading))
1329
deg1_triangulation = (gen_degrees[key[i]] == 1);
1330
}
1331
1332
nrGensInCone=dim;
1333
1334
nrTotalComparisons=dim*dim/2;
1335
if(using_GMP<Integer>())
1336
nrTotalComparisons*=GMP_time_factor; // because of the linear algebra involved in this routine
1337
Comparisons.push_back(nrTotalComparisons);
1338
1339
for (i = 0; i <dim; i++) {
1340
FACETDATA NewFacet; NewFacet.GenInHyp.resize(nr_gen);
1341
NewFacet.is_positive_on_all_original_gens=false;
1342
NewFacet.is_negative_on_some_original_gen=false;
1343
NewFacet.Hyp=H[i];
1344
NewFacet.simplicial=true; // indeed, the start simplex is simplicial
1345
for(j=0;j < dim;j++)
1346
if(j!=i)
1347
NewFacet.GenInHyp.set(key[j]);
1348
NewFacet.ValNewGen=-1; // must be taken negative since opposite facet
1349
number_hyperplane(NewFacet,0,0); // created with gen 0
1350
Facets.push_back(NewFacet); // was visible before adding this vertex
1351
}
1352
1353
if(!is_pyramid){
1354
//define Order_Vector, decides which facets of the simplices are excluded
1355
Order_Vector = vector<Integer>(dim,0);
1356
// Matrix<Integer> G=S.read_generators();
1357
for(i=0;i<dim;i++){
1358
factor=(unsigned long) (1+i%10); // (2*(rand()%(2*dim))+3);
1359
for(j=0;j<dim;j++)
1360
Order_Vector[j]+=factor*Generators[key[i]][j];
1361
}
1362
}
1363
1364
//the volume is an upper bound for the height
1365
if(do_triangulation || (do_partial_triangulation && vol>1))
1366
{
1367
store_key(key,vol,1,TriangulationBuffer);
1368
if(do_only_multiplicity) {
1369
#pragma omp atomic
1370
TotDet++;
1371
}
1372
} else if (do_partial_triangulation) {
1373
triangulation_is_partial = true;
1374
}
1375
1376
if(do_triangulation){ // we must prepare the sections of the triangulation
1377
for(i=0;i<dim;i++)
1378
{
1379
// GensInCone.push_back(key[i]); // now done in first loop since always needed
1380
TriSectionFirst.push_back(TriangulationBuffer.begin());
1381
TriSectionLast.push_back(TriangulationBuffer.begin());
1382
}
1383
}
1384
1385
}
1386
1387
1388
//---------------------------------------------------------------------------
1389
1390
template<typename Integer>
1391
void Full_Cone<Integer>::select_supphyps_from(const list<FACETDATA>& NewFacets,
1392
const size_t new_generator, const vector<key_t>& Pyramid_key){
1393
// the mother cone (=this) selects supphyps from the list NewFacets supplied by the daughter
1394
// the daughter provides the necessary information via the parameters
1395
1396
size_t i;
1397
boost::dynamic_bitset<> in_Pyr(nr_gen);
1398
for (i=0; i<Pyramid_key.size(); i++) {
1399
in_Pyr.set(Pyramid_key[i]);
1400
}
1401
// the new generator is always the first in the pyramid
1402
assert(Pyramid_key[0] == new_generator);
1403
1404
1405
typename list<FACETDATA>::const_iterator pyr_hyp = NewFacets.begin();
1406
bool new_global_hyp;
1407
FACETDATA NewFacet;
1408
NewFacet.is_positive_on_all_original_gens=false;
1409
NewFacet.is_negative_on_some_original_gen=false;
1410
NewFacet.GenInHyp.resize(nr_gen);
1411
Integer test;
1412
for (; pyr_hyp!=NewFacets.end(); ++pyr_hyp) {
1413
if(!pyr_hyp->GenInHyp.test(0)) // new gen not in hyp
1414
continue;
1415
new_global_hyp=true;
1416
for (i=0; i<nr_gen; ++i){
1417
if(in_Pyr.test(i) || !in_triang[i])
1418
continue;
1419
test=v_scalar_product(Generators[i],pyr_hyp->Hyp);
1420
if(test<=0){
1421
new_global_hyp=false;
1422
break;
1423
}
1424
1425
}
1426
if(new_global_hyp){
1427
NewFacet.Hyp=pyr_hyp->Hyp;
1428
NewFacet.GenInHyp.reset();
1429
// size_t gens_in_facet=0;
1430
for (i=0; i<Pyramid_key.size(); ++i) {
1431
if (pyr_hyp->GenInHyp.test(i) && in_triang[Pyramid_key[i]]) {
1432
NewFacet.GenInHyp.set(Pyramid_key[i]);
1433
// gens_in_facet++;
1434
}
1435
}
1436
/* for (i=0; i<nr_gen; ++i) {
1437
if (NewFacet.GenInHyp.test(i) && in_triang[i]) {
1438
gens_in_facet++;
1439
}
1440
}*/
1441
// gens_in_facet++; // Note: new generator not yet in in_triang
1442
NewFacet.GenInHyp.set(new_generator);
1443
NewFacet.simplicial=pyr_hyp->simplicial; // (gens_in_facet==dim-1);
1444
check_simpliciality_hyperplane(NewFacet);
1445
number_hyperplane(NewFacet,nrGensInCone,0); //mother unknown
1446
if(multithreaded_pyramid){
1447
#pragma omp critical(GIVEBACKHYPS)
1448
Facets.push_back(NewFacet);
1449
} else {
1450
Facets.push_back(NewFacet);
1451
}
1452
}
1453
}
1454
}
1455
1456
//---------------------------------------------------------------------------
1457
template<typename Integer>
1458
void Full_Cone<Integer>::match_neg_hyp_with_pos_hyps(const FACETDATA& hyp, size_t new_generator,list<FACETDATA*>& PosHyps, boost::dynamic_bitset<>& Zero_P){
1459
1460
size_t missing_bound, nr_common_zero;
1461
boost::dynamic_bitset<> common_zero(nr_gen);
1462
vector<key_t> common_key;
1463
common_key.reserve(nr_gen);
1464
vector<key_t> key(nr_gen);
1465
bool common_subfacet;
1466
list<FACETDATA> NewHyp;
1467
size_t subfacet_dim=dim-2;
1468
size_t nr_missing;
1469
typename list<FACETDATA*>::iterator a;
1470
list<FACETDATA> NewHyps;
1471
Matrix<Integer> Test(0,dim);
1472
1473
boost::dynamic_bitset<> zero_hyp=hyp.GenInHyp & Zero_P; // we intersect with the set of gens in positive hyps
1474
1475
vector<int> key_start(nrGensInCone);
1476
size_t nr_zero_hyp=0;
1477
size_t j;
1478
int last_existing=-1;
1479
for(size_t jj=0;jj<nrGensInCone;jj++)
1480
{
1481
j=GensInCone[jj];
1482
if(zero_hyp.test(j)){
1483
key[nr_zero_hyp]=j;
1484
for(size_t kk= last_existing+1;kk<=jj;kk++)
1485
key_start[kk]=nr_zero_hyp;
1486
nr_zero_hyp++;
1487
last_existing= jj;
1488
}
1489
}
1490
if(last_existing< (int)nrGensInCone-1)
1491
for(size_t kk=last_existing+1;kk<nrGensInCone;kk++)
1492
key_start[kk]=nr_zero_hyp;
1493
1494
if (nr_zero_hyp<dim-2)
1495
return;
1496
1497
int tn = omp_get_ancestor_thread_num(omp_start_level+1);
1498
missing_bound=nr_zero_hyp-subfacet_dim; // at most this number of generators can be missing
1499
// to have a chance for common subfacet
1500
1501
typename list< FACETDATA*>::iterator hp_j_iterator=PosHyps.begin();
1502
1503
FACETDATA* hp_j;
1504
1505
for (;hp_j_iterator!=PosHyps.end();++hp_j_iterator){ //match hyp with the given Pos
1506
hp_j=*hp_j_iterator;
1507
1508
if(hyp.Ident==hp_j->Mother || hp_j->Ident==hyp.Mother){ // mother and daughter coming together
1509
// their intersection is a subfacet
1510
add_hyperplane(new_generator,*hp_j,hyp,NewHyps,false); // simplicial set in add_hyperplane
1511
continue;
1512
}
1513
1514
1515
bool extension_test=hyp.BornAt==hp_j->BornAt || (hyp.BornAt<hp_j->BornAt && hp_j->Mother!=0)
1516
|| (hp_j->BornAt<hyp.BornAt && hyp.Mother!=0);
1517
1518
size_t both_existing_from=key_start[max(hyp.BornAt,hp_j->BornAt)];
1519
1520
nr_missing=0;
1521
nr_common_zero=0;
1522
common_key.clear();
1523
size_t second_loop_bound=nr_zero_hyp;
1524
common_subfacet=true;
1525
boost::dynamic_bitset<> common_zero(nr_gen);
1526
1527
if(extension_test){
1528
bool extended=false;
1529
second_loop_bound=both_existing_from;
1530
for(size_t k=both_existing_from;k<nr_zero_hyp;k++){
1531
if(!hp_j->GenInHyp.test(key[k])) {
1532
nr_missing++;
1533
if(nr_missing>missing_bound) {
1534
common_subfacet=false;
1535
break;
1536
}
1537
}
1538
else {
1539
extended=true;
1540
common_key.push_back(key[k]);
1541
common_zero.set(key[k]);
1542
nr_common_zero++;
1543
}
1544
}
1545
1546
if(!extended || !common_subfacet) //
1547
continue;
1548
}
1549
1550
for(size_t k=0;k<second_loop_bound;k++) {
1551
if(!hp_j->GenInHyp.test(key[k])) {
1552
nr_missing++;
1553
if(nr_missing>missing_bound) {
1554
common_subfacet=false;
1555
break;
1556
}
1557
}
1558
else {
1559
common_key.push_back(key[k]);
1560
common_zero.set(key[k]);
1561
nr_common_zero++;
1562
}
1563
}
1564
1565
if(!common_subfacet)
1566
continue;
1567
1568
assert(nr_common_zero >=subfacet_dim);
1569
1570
1571
if (!hp_j->simplicial){
1572
1573
bool ranktest;
1574
/* if(using_GMP<Integer>())
1575
ranktest = (old_nr_supp_hyps > 10*GMP_time_factor*dim*dim*nr_common_zero/3); // in this case the rank computation takes longer
1576
else
1577
ranktest = (old_nr_supp_hyps > 10*dim*dim*nr_common_zero/3); */
1578
1579
ranktest=true;
1580
1581
if(ranktest){
1582
// cout << "Rank" << endl;
1583
Matrix<Integer>& Test = Top_Cone->RankTest[tn];
1584
if(Test.rank_submatrix(Generators,common_key)<subfacet_dim)
1585
common_subfacet=false; // don't make a hyperplane
1586
}
1587
else{ // now the comparison test
1588
// cout << "Compare" << endl;
1589
auto hp_t=Facets.begin();
1590
for (;hp_t!=Facets.end();++hp_t){
1591
if(hp_t->simplicial)
1592
continue;
1593
if ((hp_t->Ident!=hyp.Ident) && (hp_t->Ident!=hp_j->Ident) && common_zero.is_subset_of(hp_t->GenInHyp)) {
1594
common_subfacet=false;
1595
break;
1596
}
1597
}
1598
} // else
1599
} // !simplicial
1600
1601
if(common_subfacet)
1602
add_hyperplane(new_generator,*hp_j,hyp,NewHyps,false); // simplicial set in add_hyperplane
1603
} // for
1604
1605
if(multithreaded_pyramid)
1606
#pragma omp critical(GIVEBACKHYPS)
1607
Facets.splice(Facets.end(),NewHyps);
1608
else
1609
Facets.splice(Facets.end(),NewHyps);
1610
1611
}
1612
1613
//---------------------------------------------------------------------------
1614
template<typename Integer>
1615
void Full_Cone<Integer>::collect_pos_supphyps(list<FACETDATA*>& PosHyps, boost::dynamic_bitset<>& Zero_P, size_t& nr_pos){
1616
1617
// positive facets are collected in a list
1618
1619
typename list<FACETDATA>::iterator ii = Facets.begin();
1620
nr_pos=0;
1621
1622
for (size_t ij=0; ij< old_nr_supp_hyps; ++ij, ++ii)
1623
if (ii->ValNewGen>0) {
1624
Zero_P |= ii->GenInHyp;
1625
PosHyps.push_back(&(*ii));
1626
nr_pos++;
1627
}
1628
}
1629
1630
//---------------------------------------------------------------------------
1631
template<typename Integer>
1632
void Full_Cone<Integer>::evaluate_large_rec_pyramids(size_t new_generator){
1633
1634
size_t nrLargeRecPyrs=LargeRecPyrs.size();
1635
if(nrLargeRecPyrs==0)
1636
return;
1637
1638
if(verbose)
1639
verboseOutput() << "large pyramids " << nrLargeRecPyrs << endl;
1640
1641
list<FACETDATA*> PosHyps;
1642
boost::dynamic_bitset<> Zero_P(nr_gen);
1643
size_t nr_pos;
1644
collect_pos_supphyps(PosHyps,Zero_P,nr_pos);
1645
1646
nrTotalComparisons+=nr_pos*nrLargeRecPyrs;
1647
#ifndef NCATCH
1648
std::exception_ptr tmp_exception;
1649
#endif
1650
1651
const long VERBOSE_STEPS = 50;
1652
long step_x_size = nrLargeRecPyrs-VERBOSE_STEPS;
1653
const size_t RepBound=100;
1654
1655
bool skip_remaining=false;
1656
1657
#pragma omp parallel
1658
{
1659
size_t ppos=0;
1660
typename list<FACETDATA>::iterator p=LargeRecPyrs.begin();
1661
1662
#pragma omp for schedule(dynamic)
1663
for(size_t i=0; i<nrLargeRecPyrs; i++){
1664
1665
if(skip_remaining)
1666
continue;
1667
1668
for(; i > ppos; ++ppos, ++p) ;
1669
for(; i < ppos; --ppos, --p) ;
1670
1671
if(verbose && nrLargeRecPyrs>=RepBound){
1672
#pragma omp critical(VERBOSE)
1673
while ((long)(i*VERBOSE_STEPS) >= step_x_size) {
1674
step_x_size += nrLargeRecPyrs;
1675
verboseOutput() << "." <<flush;
1676
}
1677
}
1678
1679
#ifndef NCATCH
1680
try {
1681
#endif
1682
1683
INTERRUPT_COMPUTATION_BY_EXCEPTION
1684
1685
match_neg_hyp_with_pos_hyps(*p,new_generator,PosHyps,Zero_P);
1686
#ifndef NCATCH
1687
} catch(const std::exception& ) {
1688
tmp_exception = std::current_exception();
1689
skip_remaining = true;
1690
#pragma omp flush(skip_remaining)
1691
}
1692
#endif
1693
}
1694
} // parallel
1695
#ifndef NCATCH
1696
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
1697
#endif
1698
1699
if(verbose && nrLargeRecPyrs>=RepBound)
1700
verboseOutput() << endl;
1701
1702
LargeRecPyrs.clear();
1703
}
1704
1705
//---------------------------------------------------------------------------
1706
1707
template<typename Integer>
1708
bool Full_Cone<Integer>::check_pyr_buffer(const size_t level){
1709
if(level==0)
1710
return(nrPyramids[0] > EvalBoundLevel0Pyr);
1711
else
1712
return(nrPyramids[level] > EvalBoundPyr);
1713
}
1714
1715
//---------------------------------------------------------------------------
1716
1717
#ifdef NMZ_MIC_OFFLOAD
1718
template<>
1719
void Full_Cone<long long>::try_offload(size_t max_level) {
1720
1721
if (!is_pyramid && _Offload_get_device_number() < 0) // dynamic check for being on CPU (-1)
1722
{
1723
1724
if (max_level >= nrPyramids.size()) max_level = nrPyramids.size()-1;
1725
for (size_t level = 0; level <= max_level; ++level) {
1726
if (nrPyramids[level] >= 100) {
1727
// cout << "XXX: Try offload of level " << level << " pyramids ..." << endl;
1728
mic_offloader.offload_pyramids(*this, level);
1729
break;
1730
}
1731
}
1732
}
1733
}
1734
1735
template<typename Integer>
1736
void Full_Cone<Integer>::try_offload(size_t max_level) {
1737
}
1738
//else it is implemented in the header
1739
1740
1741
template<typename Integer>
1742
void Full_Cone<Integer>::try_offload_loc(long place,size_t max_level){
1743
verboseOutput() << "From place " << place << " " << "level " << max_level << endl;
1744
try_offload(max_level);
1745
}
1746
1747
#endif // NMZ_MIC_OFFLOAD
1748
1749
//---------------------------------------------------------------------------
1750
1751
template<typename Integer>
1752
void Full_Cone<Integer>::evaluate_stored_pyramids(const size_t level){
1753
// evaluates the stored non-recursive pyramids
1754
1755
#ifdef NMZ_MIC_OFFLOAD
1756
Pyramids_scrambled[level]=false;
1757
1758
if(level==0 && _Offload_get_device_number() >= 0){
1759
verboseOutput() << "Start evaluation of " << nrPyramids[level] << " pyrs on level " << level << endl;
1760
// verboseOutput() << "In parallel " << omp_in_parallel() << endl;
1761
}
1762
#endif // NMZ_MIC_OFFLOAD
1763
1764
if(Pyramids[level].empty())
1765
return;
1766
1767
assert(omp_get_level()==omp_start_level); // assert(!omp_in_parallel());
1768
assert(!is_pyramid);
1769
1770
if (Pyramids.size() < level+2) {
1771
Pyramids.resize(level+2); // provide space for a new generation
1772
nrPyramids.resize(level+2, 0);
1773
Pyramids_scrambled.resize(level+2, false);
1774
}
1775
1776
size_t eval_down_to = 0;
1777
1778
#ifdef NMZ_MIC_OFFLOAD
1779
#ifndef __MIC__
1780
// only on host and if offload is available
1781
if (level == 0 && nrPyramids[0] > EvalBoundLevel0Pyr) {
1782
eval_down_to = EvalBoundLevel0Pyr;
1783
}
1784
#endif
1785
#endif
1786
1787
vector<char> Done(nrPyramids[level],0);
1788
if (verbose) {
1789
verboseOutput() << "**************************************************" << endl;
1790
1791
for (size_t l=0; l<=level; ++l) {
1792
if (nrPyramids[l]>0) {
1793
verboseOutput() << "level " << l << " pyramids remaining: "
1794
<< nrPyramids[l] << endl;
1795
}
1796
}
1797
verboseOutput() << "**************************************************" << endl;
1798
}
1799
typename list<vector<key_t> >::iterator p;
1800
size_t ppos;
1801
bool skip_remaining;
1802
#ifndef NCATCH
1803
std::exception_ptr tmp_exception;
1804
#endif
1805
1806
while (nrPyramids[level] > eval_down_to) {
1807
1808
p = Pyramids[level].begin();
1809
ppos=0;
1810
skip_remaining = false;
1811
1812
#pragma omp parallel for firstprivate(p,ppos) schedule(dynamic)
1813
for(size_t i=0; i<nrPyramids[level]; i++){
1814
if (skip_remaining)
1815
continue;
1816
for(; i > ppos; ++ppos, ++p) ;
1817
for(; i < ppos; --ppos, --p) ;
1818
1819
if(Done[i])
1820
continue;
1821
Done[i]=1;
1822
1823
#ifndef NCATCH
1824
try {
1825
#endif
1826
INTERRUPT_COMPUTATION_BY_EXCEPTION
1827
1828
Full_Cone<Integer> Pyramid(*this,*p);
1829
// Pyramid.recursion_allowed=false;
1830
Pyramid.do_all_hyperplanes=false;
1831
if (level>=2 && do_partial_triangulation){ // limits the descent of do_partial_triangulation
1832
Pyramid.do_triangulation=true;
1833
Pyramid.do_partial_triangulation=false;
1834
}
1835
Pyramid.store_level=level+1;
1836
Pyramid.build_cone();
1837
if (check_evaluation_buffer_size() || Top_Cone->check_pyr_buffer(level+1)) {
1838
// interrupt parallel execution to keep the buffer under control
1839
skip_remaining = true;
1840
}
1841
#ifndef NCATCH
1842
} catch(const std::exception& ) {
1843
tmp_exception = std::current_exception();
1844
skip_remaining = true;
1845
#pragma omp flush(skip_remaining)
1846
}
1847
#endif
1848
} //end parallel for
1849
#ifndef NCATCH
1850
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
1851
#endif
1852
1853
// remove done pyramids
1854
p = Pyramids[level].begin();
1855
for(size_t i=0; p != Pyramids[level].end(); i++){
1856
if (Done[i]) {
1857
p=Pyramids[level].erase(p);
1858
nrPyramids[level]--;
1859
Done[i]=0;
1860
} else {
1861
++p;
1862
}
1863
}
1864
1865
try_offload(level+1);
1866
1867
if (check_evaluation_buffer_size()) {
1868
if (verbose)
1869
verboseOutput() << nrPyramids[level] <<
1870
" pyramids remaining on level " << level << ", ";
1871
Top_Cone->evaluate_triangulation();
1872
try_offload(level+1);
1873
}
1874
1875
if (Top_Cone->check_pyr_buffer(level+1)) {
1876
evaluate_stored_pyramids(level+1);
1877
}
1878
1879
} //end while (nrPyramids[level] > 0)
1880
1881
if (verbose) {
1882
verboseOutput() << "**************************************************" << endl;
1883
verboseOutput() << "all pyramids on level "<< level << " done!"<<endl;
1884
if (nrPyramids[level+1] == 0) {
1885
for (size_t l=0; l<=level; ++l) {
1886
if (nrPyramids[l]>0) {
1887
verboseOutput() << "level " << l << " pyramids remaining: "
1888
<< nrPyramids[l] << endl;
1889
}
1890
}
1891
verboseOutput() << "**************************************************" << endl;
1892
}
1893
}
1894
if(check_evaluation_buffer())
1895
{
1896
Top_Cone->evaluate_triangulation();
1897
}
1898
1899
evaluate_stored_pyramids(level+1);
1900
}
1901
1902
1903
1904
//---------------------------------------------------------------------------
1905
1906
/* builds the cone successively by inserting generators */
1907
template<typename Integer>
1908
void Full_Cone<Integer>::build_cone() {
1909
// if(dim>0){ //correction needed to include the 0 cone;
1910
1911
// cout << "Pyr " << pyr_level << endl;
1912
1913
long long RecBoundSuppHyp;
1914
RecBoundSuppHyp = dim*dim*dim*SuppHypRecursionFactor; //dim^3 * 50
1915
if(using_GMP<Integer>())
1916
RecBoundSuppHyp*=GMP_time_factor; // pyramid building is more difficult for complicated arithmetic
1917
1918
size_t RecBoundTriang=1000000; // if number(supphyps)*size(triang) > RecBoundTriang pass to pyramids
1919
if(using_GMP<Integer>())
1920
RecBoundTriang*=GMP_time_factor;
1921
1922
tri_recursion=false;
1923
1924
multithreaded_pyramid=(omp_get_level()==omp_start_level);
1925
1926
size_t nr_original_gen=0;
1927
size_t steps_in_approximation = 0;
1928
if (!is_pyramid && is_approximation)
1929
{
1930
nr_original_gen = OriginalGenerators.nr_of_rows();
1931
vector<size_t> nr_approx_points; // how many points are in the approximation
1932
for (size_t j=0;j<nr_original_gen;++j) {
1933
nr_approx_points.push_back(approx_points_keys[j].size());
1934
}
1935
// for every vertex sort the approximation points via: number of positive halfspaces / index
1936
vector<key_t> overall_perm;
1937
// stores the perm of every list
1938
vector<vector<key_t>> local_perms(nr_original_gen);
1939
1940
for (size_t current_gen = 0 ; current_gen<nr_original_gen;++current_gen){
1941
vector<key_t> local_perm;
1942
if (approx_points_keys[current_gen].size()>0){
1943
auto jt=approx_points_keys[current_gen].begin();
1944
list<pair<size_t,key_t>> max_halfspace_index_list;
1945
size_t tmp_hyp=0;
1946
// TODO: collect only those which belong to the current generator?
1947
for (;jt!=approx_points_keys[current_gen].end();++jt){
1948
// cout << dim << " " << Support_Hyperplanes.nr_of_columns()<< " " << Generators[*jt].size() << endl;
1949
tmp_hyp = v_nr_negative(Support_Hyperplanes.MxV(Generators[*jt])); // nr of negative halfspaces
1950
max_halfspace_index_list.insert(max_halfspace_index_list.end(),make_pair(tmp_hyp,*jt));
1951
}
1952
max_halfspace_index_list.sort([](const pair<size_t,key_t> &left, const pair<size_t,key_t> &right) {
1953
return right.first < left.first;
1954
});
1955
auto list_it = max_halfspace_index_list.begin();
1956
for(;list_it!=max_halfspace_index_list.end();++list_it){
1957
local_perm.push_back(list_it->second);
1958
}
1959
}
1960
local_perms[current_gen]=local_perm;
1961
}
1962
// concatenate the permutations
1963
size_t local_perm_counter=0;
1964
bool not_done = true;
1965
while (not_done){
1966
not_done=false;
1967
for (size_t current_gen=0;current_gen<nr_original_gen;++current_gen){
1968
if (local_perm_counter<nr_approx_points[current_gen]){
1969
not_done=true;
1970
overall_perm.push_back(local_perms[current_gen][local_perm_counter]);
1971
}
1972
}
1973
++local_perm_counter;
1974
}
1975
assert(overall_perm.size()==nr_gen);
1976
// sort the generators according to the permutations
1977
Generators.order_rows_by_perm(overall_perm);
1978
}
1979
1980
if(!use_existing_facets){
1981
if(multithreaded_pyramid){
1982
HypCounter.resize(omp_get_max_threads());
1983
for(size_t i=0;i<HypCounter.size();++i)
1984
HypCounter[i]=i+1;
1985
} else{
1986
HypCounter.resize(1);
1987
HypCounter[0]=1;
1988
}
1989
1990
find_and_evaluate_start_simplex();
1991
}
1992
1993
size_t last_to_be_inserted; // good to know in case of do_all_hyperplanes==false
1994
last_to_be_inserted=nr_gen-1; // because we don't need to compute support hyperplanes in this case
1995
for(int j=nr_gen-1;j>=0;--j){
1996
if(!in_triang[j]){
1997
last_to_be_inserted=j;
1998
break;
1999
}
2000
} // last_to_be_inserted now determined
2001
2002
bool is_new_generator;
2003
typename list< FACETDATA >::iterator l;
2004
2005
bool check_original_gens=true;
2006
2007
for (size_t i=start_from;i<nr_gen;++i) {
2008
2009
INTERRUPT_COMPUTATION_BY_EXCEPTION
2010
2011
time_t start,end;
2012
time (&start);
2013
2014
start_from=i;
2015
2016
if (in_triang[i])
2017
continue;
2018
2019
if (!is_pyramid && is_approximation) steps_in_approximation++;
2020
// we check whether all original generators are contained in the current cone
2021
if (!is_pyramid && is_approximation && check_original_gens){
2022
if (verbose)
2023
verboseOutput() << "Check...";
2024
size_t current_gen=0;
2025
l=Facets.begin();
2026
for (;l!=Facets.end();++l){
2027
if (l->is_positive_on_all_original_gens) continue;
2028
for (current_gen=0;current_gen<nr_original_gen;++current_gen){
2029
if (!v_scalar_product_nonnegative(l->Hyp,OriginalGenerators[current_gen])) {
2030
l->is_negative_on_some_original_gen=true;
2031
check_original_gens=false;
2032
break;
2033
}
2034
}
2035
if (current_gen==nr_original_gen){
2036
l->is_positive_on_all_original_gens=true;
2037
} else {
2038
break;
2039
}
2040
}
2041
if(verbose)
2042
verboseOutput() << " done." << endl;
2043
// now we need to stop
2044
if (l==Facets.end()){
2045
if(verbose)
2046
verboseOutput() << "The original cone is now contained." << endl;
2047
break;
2048
}
2049
}
2050
2051
if(do_triangulation && TriangulationBufferSize > 2*RecBoundTriang) // emermergency brake
2052
tri_recursion=true; // to switch off production of simplices in favor
2053
// of non-recursive pyramids
2054
Integer scalar_product;
2055
is_new_generator=false;
2056
l=Facets.begin();
2057
old_nr_supp_hyps=Facets.size(); // Facets will be xtended in the loop
2058
2059
long long nr_pos=0, nr_neg=0;
2060
long long nr_neg_simp=0, nr_pos_simp=0;
2061
vector<Integer> L;
2062
#ifndef NCATCH
2063
std::exception_ptr tmp_exception;
2064
#endif
2065
2066
size_t lpos=0;
2067
#pragma omp parallel for private(L,scalar_product) firstprivate(lpos,l) reduction(+: nr_pos, nr_neg)
2068
for (size_t k=0; k<old_nr_supp_hyps; k++) {
2069
#ifndef NCATCH
2070
try {
2071
#endif
2072
for(;k > lpos; lpos++, l++) ;
2073
for(;k < lpos; lpos--, l--) ;
2074
2075
L=Generators[i];
2076
scalar_product=v_scalar_product(L,(*l).Hyp);
2077
l->ValNewGen=scalar_product;
2078
if (scalar_product<0) {
2079
is_new_generator=true;
2080
nr_neg++;
2081
if(l->simplicial)
2082
#pragma omp atomic
2083
nr_neg_simp++;
2084
}
2085
if (scalar_product>0) {
2086
nr_pos++;
2087
if(l->simplicial)
2088
#pragma omp atomic
2089
nr_pos_simp++;
2090
}
2091
#ifndef NCATCH
2092
} catch(const std::exception& ) {
2093
tmp_exception = std::current_exception();
2094
}
2095
#endif
2096
} //end parallel for
2097
#ifndef NCATCH
2098
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
2099
#endif
2100
2101
if(!is_new_generator)
2102
continue;
2103
2104
// the i-th generator is used in the triangulation
2105
// in_triang[i]=true; // now at end of loop
2106
if (deg1_triangulation && isComputed(ConeProperty::Grading))
2107
deg1_triangulation = (gen_degrees[i] == 1);
2108
2109
if (!omp_in_parallel())
2110
try_offload(0);
2111
/* if(!is_pyramid && verbose )
2112
verboseOutput() << "Neg " << nr_neg << " Pos " << nr_pos << " NegSimp " <<nr_neg_simp << " PosSimp " <<nr_pos_simp << endl; */
2113
// First we test whether to go to recursive pyramids because of too many supphyps
2114
if (recursion_allowed && nr_neg*nr_pos-(nr_neg_simp*nr_pos_simp) > RecBoundSuppHyp) { // use pyramids because of supphyps
2115
if(!is_pyramid && verbose )
2116
verboseOutput() << "Building pyramids" << endl;
2117
if (do_triangulation)
2118
tri_recursion = true; // We can not go back to classical triangulation
2119
if(check_evaluation_buffer()){
2120
Top_Cone->evaluate_triangulation();
2121
}
2122
2123
process_pyramids(i,true); //recursive
2124
lastGen=i;
2125
nextGen=i+1;
2126
}
2127
else{ // now we check whether to go to pyramids because of the size of triangulation
2128
// once we have done so, we must stay with it
2129
if( tri_recursion || (do_triangulation
2130
&& (nr_neg*TriangulationBufferSize > RecBoundTriang
2131
|| 3*omp_get_max_threads()*TriangulationBufferSize>EvalBoundTriang ))){ // go to pyramids because of triangulation
2132
if(check_evaluation_buffer()){
2133
Top_Cone->evaluate_triangulation();
2134
}
2135
tri_recursion=true;
2136
process_pyramids(i,false); //non-recursive
2137
}
2138
else{ // no pyramids necesary
2139
if(do_partial_triangulation)
2140
process_pyramids(i,false); // non-recursive
2141
if(do_triangulation)
2142
extend_triangulation(i);
2143
}
2144
2145
if(do_all_hyperplanes || i!=last_to_be_inserted)
2146
find_new_facets(i);
2147
}
2148
size_t nr_new_facets = Facets.size() - old_nr_supp_hyps;
2149
time (&end);
2150
/* double dif = difftime (end,start);
2151
2152
if (verbose) {
2153
verboseOutput() << "Generator took " << dif << " sec " <<endl;
2154
}*/
2155
2156
// removing the negative hyperplanes if necessary
2157
if(do_all_hyperplanes || i!=last_to_be_inserted){
2158
l=Facets.begin();
2159
for (size_t j=0; j<old_nr_supp_hyps;j++){
2160
if (l->ValNewGen<0) {
2161
if (is_approximation && l->is_negative_on_some_original_gen){
2162
check_original_gens = true;
2163
}
2164
l=Facets.erase(l);
2165
}
2166
else
2167
++l;
2168
}
2169
}
2170
2171
GensInCone.push_back(i);
2172
nrGensInCone++;
2173
2174
Comparisons.push_back(nrTotalComparisons);
2175
2176
if(verbose) {
2177
verboseOutput() << "gen="<< i+1 <<", ";
2178
if (do_all_hyperplanes || i!=last_to_be_inserted) {
2179
verboseOutput() << Facets.size()<<" hyp, " << nr_new_facets << " new";
2180
} else {
2181
verboseOutput() << Support_Hyperplanes.nr_of_rows()<<" hyp";
2182
}
2183
if(nrPyramids[0]>0)
2184
verboseOutput() << ", " << nrPyramids[0] << " pyr";
2185
if(do_triangulation||do_partial_triangulation)
2186
verboseOutput() << ", " << TriangulationBufferSize << " simpl";
2187
verboseOutput()<< endl;
2188
}
2189
2190
in_triang[i]=true;
2191
2192
} // loop over i
2193
2194
start_from=nr_gen;
2195
2196
if (is_pyramid && do_all_hyperplanes) // must give supphyps back to mother
2197
Mother->select_supphyps_from(Facets, apex, Mother_Key);
2198
2199
INTERRUPT_COMPUTATION_BY_EXCEPTION
2200
2201
// transfer Facets --> SupportHyperplanes
2202
if (do_all_hyperplanes) {
2203
nrSupport_Hyperplanes = Facets.size();
2204
Support_Hyperplanes = Matrix<Integer>(nrSupport_Hyperplanes,0);
2205
typename list<FACETDATA>::iterator IHV=Facets.begin();
2206
for (size_t i=0; i<nrSupport_Hyperplanes; ++i, ++IHV) {
2207
swap(Support_Hyperplanes[i],IHV->Hyp);
2208
}
2209
is_Computed.set(ConeProperty::SupportHyperplanes);
2210
}
2211
Support_Hyperplanes.set_nr_of_columns(dim);
2212
2213
2214
if(do_extreme_rays && do_all_hyperplanes)
2215
compute_extreme_rays(true);
2216
2217
INTERRUPT_COMPUTATION_BY_EXCEPTION
2218
2219
transfer_triangulation_to_top(); // transfer remaining simplices to top
2220
if(check_evaluation_buffer()){
2221
Top_Cone->evaluate_triangulation();
2222
}
2223
2224
if (!is_pyramid && is_approximation && verbose){
2225
verboseOutput() << "Performed " << steps_in_approximation << "/" << nr_gen << " steps." << endl;
2226
}
2227
// } // end if (dim>0)
2228
2229
Facets.clear();
2230
2231
}
2232
2233
//---------------------------------------------------------------------------
2234
2235
template<typename Integer>
2236
void Full_Cone<Integer>::find_bottom_facets() {
2237
2238
if(verbose)
2239
verboseOutput() << "Computing bottom decomposition" << endl;
2240
2241
vector<key_t> start_simpl=Generators.max_rank_submatrix_lex();
2242
Order_Vector = vector<Integer>(dim,0);
2243
for(size_t i=0;i<dim;++i)
2244
for(size_t j=0;j<dim;++j)
2245
Order_Vector[j]+=((unsigned long) (1+i%10))*Generators[start_simpl[i]][j];
2246
2247
// First the generators for the recession cone = our cone
2248
Matrix<Integer> BottomGen(0,dim+1);
2249
vector<Integer> help(dim+1);
2250
for(size_t i=0;i<nr_gen;++i){
2251
for(size_t j=0;j<dim; ++j)
2252
help[j]=Generators[i][j];
2253
help[dim]=0;
2254
BottomGen.append(help);
2255
}
2256
// then the same vectors as generators of the bottom polyhedron
2257
for(size_t i=0;i<nr_gen;++i){
2258
for(size_t j=0;j<dim; ++j)
2259
help[j]=Generators[i][j];
2260
help[dim]=1;
2261
BottomGen.append(help);
2262
}
2263
2264
Full_Cone BottomPolyhedron(BottomGen);
2265
BottomPolyhedron.verbose=verbose;
2266
BottomPolyhedron.do_extreme_rays=true;
2267
BottomPolyhedron.keep_order = true;
2268
try {
2269
BottomPolyhedron.dualize_cone(); // includes finding extreme rays
2270
} catch(const NonpointedException& ){};
2271
2272
// transfer pointedness
2273
assert( BottomPolyhedron.isComputed(ConeProperty::IsPointed) );
2274
pointed = BottomPolyhedron.pointed;
2275
is_Computed.set(ConeProperty::IsPointed);
2276
2277
// BottomPolyhedron.Support_Hyperplanes.pretty_print(cout);
2278
2279
help.resize(dim);
2280
2281
// find extreme rays of Bottom among the generators
2282
vector<key_t> BottomExtRays;
2283
for(size_t i=0;i<nr_gen;++i)
2284
if(BottomPolyhedron.Extreme_Rays_Ind[i+nr_gen])
2285
BottomExtRays.push_back(i);
2286
/* vector<key_t> BottomExtRays; // can be used if the bool vector should not exist anymore
2287
size_t start_search=0;
2288
for(size_t i=0;i<ExtStrahl.nr_of_rows();++i){
2289
if(BottomPolyhedron.ExtStrahl[i][dim]==1){
2290
BottomPolyhedron.ExtStrahl[i].resize(dim);
2291
for(size_t j=0;j<nr_gen;++j){
2292
size_t k=(j+start_search) % nr_gen;
2293
if(BottomPolyhedron.ExtStrahl[i]==Generators[k]){
2294
BottomExtRays.push_back(k);
2295
start_search++;
2296
}
2297
}
2298
}
2299
}*/
2300
2301
if(verbose)
2302
verboseOutput() << "Bottom has " << BottomExtRays.size() << " extreme rays" << endl;
2303
2304
INTERRUPT_COMPUTATION_BY_EXCEPTION
2305
2306
Matrix<Integer> BottomFacets(0,dim);
2307
vector<Integer> BottomDegs(0,dim);
2308
if (!isComputed(ConeProperty::SupportHyperplanes)) {
2309
Support_Hyperplanes = Matrix<Integer>(0,dim);
2310
nrSupport_Hyperplanes=0;
2311
}
2312
for(size_t i=0;i<BottomPolyhedron.nrSupport_Hyperplanes;++i){
2313
Integer test=BottomPolyhedron.Support_Hyperplanes[i][dim];
2314
for(size_t j=0;j<dim;++j)
2315
help[j]=BottomPolyhedron.Support_Hyperplanes[i][j];
2316
if(test==0 && !isComputed(ConeProperty::SupportHyperplanes)){
2317
Support_Hyperplanes.append(help);
2318
nrSupport_Hyperplanes++;
2319
}
2320
if (test < 0){
2321
BottomFacets.append(help);
2322
BottomDegs.push_back(-test);
2323
}
2324
}
2325
2326
is_Computed.set(ConeProperty::SupportHyperplanes);
2327
2328
if (!pointed)
2329
throw NonpointedException();
2330
2331
INTERRUPT_COMPUTATION_BY_EXCEPTION
2332
2333
vector<key_t> facet;
2334
for(size_t i=0;i<BottomFacets.nr_of_rows();++i){
2335
facet.clear();
2336
for(size_t k=0;k<BottomExtRays.size();++k)
2337
if(v_scalar_product(Generators[BottomExtRays[k]],BottomFacets[i])==BottomDegs[i])
2338
facet.push_back(BottomExtRays[k]);
2339
Pyramids[0].push_back(facet);
2340
nrPyramids[0]++;
2341
}
2342
if(verbose)
2343
verboseOutput() << "Bottom decomposition computed, " << nrPyramids[0] << " subcones" << endl;
2344
}
2345
2346
template<typename Integer>
2347
void Full_Cone<Integer>::start_message() {
2348
2349
if (verbose) {
2350
verboseOutput()<<"************************************************************"<<endl;
2351
verboseOutput()<<"starting primal algorithm ";
2352
if (do_partial_triangulation) verboseOutput()<<"with partial triangulation ";
2353
if (do_triangulation) {
2354
verboseOutput()<<"with full triangulation ";
2355
}
2356
if (!do_triangulation && !do_partial_triangulation) verboseOutput()<<"(only support hyperplanes) ";
2357
verboseOutput()<<"..."<<endl;
2358
}
2359
}
2360
2361
template<typename Integer>
2362
void Full_Cone<Integer>::end_message() {
2363
2364
if (verbose) {
2365
verboseOutput() << "------------------------------------------------------------"<<endl;
2366
}
2367
}
2368
2369
2370
//---------------------------------------------------------------------------
2371
2372
template<typename Integer>
2373
void Full_Cone<Integer>::build_top_cone() {
2374
2375
OldCandidates.verbose=verbose;
2376
NewCandidates.verbose=verbose;
2377
2378
if(dim==0)
2379
return;
2380
2381
if( ( !do_bottom_dec || deg1_generated || dim==1 || (!do_triangulation && !do_partial_triangulation))) {
2382
build_cone();
2383
}
2384
else{
2385
find_bottom_facets();
2386
start_from=nr_gen;
2387
deg1_triangulation=false;
2388
2389
vector<list<vector<key_t> >::iterator > level0_order;
2390
level0_order.reserve(nrPyramids[0]);
2391
auto p=Pyramids[0].begin();
2392
for(size_t k=0;k<nrPyramids[0];++k,++p){
2393
level0_order.push_back(p);
2394
}
2395
for(size_t k=0;k<5*nrPyramids[0];++k){
2396
swap(level0_order[rand()%nrPyramids[0]],level0_order[rand()%nrPyramids[0]]);
2397
}
2398
list<vector<key_t> > new_order;
2399
for(size_t k=0;k<nrPyramids[0];++k){
2400
new_order.push_back(*level0_order[k]);
2401
}
2402
Pyramids[0].clear();
2403
Pyramids[0].splice(Pyramids[0].begin(),new_order);
2404
2405
}
2406
2407
// try_offload(0); // superfluous since tried immediately in evaluate_stored_pyramids(0)
2408
2409
evaluate_stored_pyramids(0); // force evaluation of remaining pyramids
2410
2411
#ifdef NMZ_MIC_OFFLOAD
2412
if (_Offload_get_device_number() < 0) // dynamic check for being on CPU (-1)
2413
{
2414
evaluate_stored_pyramids(0); // previous run could have left over pyramids
2415
mic_offloader.evaluate_triangulation();
2416
}
2417
#endif // NMZ_MIC_OFFLOAD
2418
2419
}
2420
2421
2422
//---------------------------------------------------------------------------
2423
2424
template<typename Integer>
2425
bool Full_Cone<Integer>::check_evaluation_buffer(){
2426
2427
return(omp_get_level()==omp_start_level && check_evaluation_buffer_size());
2428
}
2429
2430
//---------------------------------------------------------------------------
2431
2432
template<typename Integer>
2433
bool Full_Cone<Integer>::check_evaluation_buffer_size(){
2434
2435
return(!Top_Cone->keep_triangulation &&
2436
Top_Cone->TriangulationBufferSize > EvalBoundTriang);
2437
}
2438
2439
//---------------------------------------------------------------------------
2440
2441
template<typename Integer>
2442
void Full_Cone<Integer>::transfer_triangulation_to_top(){
2443
2444
size_t i;
2445
2446
// cout << "Pyr level " << pyr_level << endl;
2447
2448
if(!is_pyramid) { // we are in top cone
2449
if(check_evaluation_buffer()){
2450
evaluate_triangulation();
2451
}
2452
return; // no transfer necessary
2453
}
2454
2455
// now we are in a pyramid
2456
2457
// cout << "In pyramid " << endl;
2458
int tn = 0;
2459
if (omp_in_parallel())
2460
tn = omp_get_ancestor_thread_num(omp_start_level+1);
2461
2462
auto pyr_simp=TriangulationBuffer.begin();
2463
while (pyr_simp!=TriangulationBuffer.end()) {
2464
if (pyr_simp->height == 0) { // it was marked to be skipped
2465
Top_Cone->FS[tn].splice(Top_Cone->FS[tn].end(), TriangulationBuffer, pyr_simp++);
2466
--TriangulationBufferSize;
2467
} else {
2468
for (i=0; i<dim; i++) // adjust key to topcone generators
2469
pyr_simp->key[i]=Top_Key[pyr_simp->key[i]];
2470
sort(pyr_simp->key.begin(),pyr_simp->key.end());
2471
++pyr_simp;
2472
}
2473
}
2474
2475
// cout << "Keys transferred " << endl;
2476
#pragma omp critical(TRIANG)
2477
{
2478
Top_Cone->TriangulationBuffer.splice(Top_Cone->TriangulationBuffer.end(),TriangulationBuffer);
2479
Top_Cone->TriangulationBufferSize += TriangulationBufferSize;
2480
}
2481
TriangulationBufferSize = 0;
2482
2483
}
2484
2485
//---------------------------------------------------------------------------
2486
template<typename Integer>
2487
void Full_Cone<Integer>::get_supphyps_from_copy(bool from_scratch){
2488
2489
if(isComputed(ConeProperty::SupportHyperplanes)) // we have them already
2490
return;
2491
2492
Full_Cone copy((*this).Generators);
2493
copy.verbose=verbose;
2494
if(!from_scratch){
2495
copy.start_from=start_from;
2496
copy.use_existing_facets=true;
2497
copy.keep_order=true;
2498
copy.HypCounter=HypCounter;
2499
copy.Extreme_Rays_Ind=Extreme_Rays_Ind;
2500
copy.in_triang=in_triang;
2501
copy.old_nr_supp_hyps=old_nr_supp_hyps;
2502
if(isComputed(ConeProperty::ExtremeRays))
2503
copy.is_Computed.set(ConeProperty::ExtremeRays);
2504
copy.GensInCone=GensInCone;
2505
copy.nrGensInCone=nrGensInCone;
2506
copy.Comparisons=Comparisons;
2507
if(!Comparisons.empty())
2508
copy.nrTotalComparisons=Comparisons[Comparisons.size()-1];
2509
2510
typename list< FACETDATA >::const_iterator l=Facets.begin();
2511
2512
for(size_t i=0;i<old_nr_supp_hyps;++i){
2513
copy.Facets.push_back(*l);
2514
++l;
2515
}
2516
}
2517
2518
copy.dualize_cone();
2519
2520
std::swap(Support_Hyperplanes,copy.Support_Hyperplanes);
2521
nrSupport_Hyperplanes = copy.nrSupport_Hyperplanes;
2522
is_Computed.set(ConeProperty::SupportHyperplanes);
2523
do_all_hyperplanes = false;
2524
}
2525
2526
2527
//---------------------------------------------------------------------------
2528
2529
template<typename Integer>
2530
void Full_Cone<Integer>::update_reducers(bool forced){
2531
2532
if((!do_Hilbert_basis || do_module_gens_intcl) && !forced)
2533
return;
2534
2535
if(NewCandidates.Candidates.empty())
2536
return;
2537
2538
INTERRUPT_COMPUTATION_BY_EXCEPTION
2539
2540
if(nr_gen==dim) // no global reduction in the simplicial case
2541
NewCandidates.sort_by_deg();
2542
if(nr_gen!=dim || forced){ // global reduction in the nonsimplicial case (or forced)
2543
NewCandidates.auto_reduce();
2544
if(verbose){
2545
verboseOutput() << "reducing " << OldCandidates.Candidates.size() << " candidates by " << NewCandidates.Candidates.size() << " reducers" << endl;
2546
}
2547
OldCandidates.reduce_by(NewCandidates);
2548
}
2549
OldCandidates.merge(NewCandidates);
2550
CandidatesSize=OldCandidates.Candidates.size();
2551
}
2552
2553
//---------------------------------------------------------------------------
2554
2555
template<typename Integer>
2556
void Full_Cone<Integer>::prepare_old_candidates_and_support_hyperplanes(){
2557
2558
if(!isComputed(ConeProperty::SupportHyperplanes)){
2559
if (verbose) {
2560
verboseOutput() << "**** Computing support hyperplanes for reduction:" << endl;
2561
}
2562
get_supphyps_from_copy(false);
2563
}
2564
2565
check_pointed();
2566
if(!pointed){
2567
throw NonpointedException();
2568
}
2569
2570
int max_threads = omp_get_max_threads();
2571
size_t Memory_per_gen=8*nrSupport_Hyperplanes;
2572
size_t max_nr_gen=RAM_Size/(Memory_per_gen*max_threads);
2573
// cout << "max_nr_gen " << max_nr_gen << endl;
2574
AdjustedReductionBound=max_nr_gen;
2575
if(AdjustedReductionBound < 2000)
2576
AdjustedReductionBound=2000;
2577
2578
2579
Sorting=compute_degree_function();
2580
if (!is_approximation) {
2581
bool save_do_module_gens_intcl=do_module_gens_intcl;
2582
do_module_gens_intcl=false; // to avoid multiplying sort_deg by 2 for the original generators
2583
for (size_t i = 0; i <nr_gen; i++) {
2584
// cout << gen_levels[i] << " ** " << Generators[i];
2585
if(!inhomogeneous || gen_levels[i]==0 || (!save_do_module_gens_intcl && gen_levels[i]<=1)) {
2586
OldCandidates.Candidates.push_back(Candidate<Integer>(Generators[i],*this));
2587
OldCandidates.Candidates.back().original_generator=true;
2588
}
2589
}
2590
do_module_gens_intcl=save_do_module_gens_intcl; // restore
2591
if(!do_module_gens_intcl) // if do_module_gens_intcl we don't want to change the original monoid
2592
OldCandidates.auto_reduce();
2593
else
2594
OldCandidates.sort_by_deg();
2595
}
2596
}
2597
2598
//---------------------------------------------------------------------------
2599
2600
template<typename Integer>
2601
void Full_Cone<Integer>::evaluate_triangulation(){
2602
2603
// prepare reduction
2604
if (do_Hilbert_basis && OldCandidates.Candidates.empty()) {
2605
prepare_old_candidates_and_support_hyperplanes();
2606
}
2607
2608
if (TriangulationBufferSize == 0)
2609
return;
2610
2611
assert(omp_get_level()==omp_start_level);
2612
2613
const long VERBOSE_STEPS = 50;
2614
long step_x_size = TriangulationBufferSize-VERBOSE_STEPS;
2615
if (verbose) {
2616
verboseOutput() << "evaluating "<<TriangulationBufferSize<<" simplices" <<endl;
2617
/* verboseOutput() << "---------+---------+---------+---------+---------+"
2618
<< " (one | per 2%)" << endl;*/
2619
}
2620
2621
totalNrSimplices += TriangulationBufferSize;
2622
2623
if(do_Stanley_dec || keep_triangulation){ // in these cases sorting is necessary
2624
auto simp=TriangulationBuffer.begin();
2625
for(;simp!=TriangulationBuffer.end();++simp)
2626
sort(simp->key.begin(),simp->key.end());
2627
}
2628
2629
if(do_evaluation && !do_only_multiplicity) {
2630
2631
deque<bool> done(TriangulationBufferSize,false);
2632
bool skip_remaining;
2633
#ifndef NCATCH
2634
std::exception_ptr tmp_exception;
2635
#endif
2636
2637
do{ // allows multiple run of loop below in case of interruption for the update of reducers
2638
2639
skip_remaining=false;
2640
step_x_size = TriangulationBufferSize-VERBOSE_STEPS;
2641
2642
#pragma omp parallel
2643
{
2644
typename list< SHORTSIMPLEX<Integer> >::iterator s = TriangulationBuffer.begin();
2645
size_t spos=0;
2646
int tn = omp_get_thread_num();
2647
#pragma omp for schedule(dynamic) nowait
2648
for(size_t i=0; i<TriangulationBufferSize; i++){
2649
#ifndef NCATCH
2650
try {
2651
#endif
2652
if(skip_remaining)
2653
continue;
2654
2655
for(; i > spos; ++spos, ++s) ;
2656
for(; i < spos; --spos, --s) ;
2657
2658
INTERRUPT_COMPUTATION_BY_EXCEPTION
2659
2660
if(done[spos])
2661
continue;
2662
2663
done[spos]=true;
2664
2665
/* if(keep_triangulation || do_Stanley_dec) -- now done above
2666
sort(s->key.begin(),s->key.end()); */
2667
if(!SimplexEval[tn].evaluate(*s)){
2668
#pragma omp critical(LARGESIMPLEX)
2669
LargeSimplices.push_back(SimplexEval[tn]);
2670
}
2671
if (verbose) {
2672
#pragma omp critical(VERBOSE)
2673
while ((long)(i*VERBOSE_STEPS) >= step_x_size) {
2674
step_x_size += TriangulationBufferSize;
2675
verboseOutput() << "|" <<flush;
2676
}
2677
}
2678
2679
if(do_Hilbert_basis && Results[tn].get_collected_elements_size() > AdjustedReductionBound)
2680
skip_remaining=true;
2681
#ifndef NCATCH
2682
} catch(const std::exception& ) {
2683
tmp_exception = std::current_exception();
2684
skip_remaining = true;
2685
#pragma omp flush(skip_remaining)
2686
}
2687
#endif
2688
}
2689
Results[tn].transfer_candidates();
2690
} // end parallel
2691
#ifndef NCATCH
2692
if (!(tmp_exception == 0)) std::rethrow_exception(tmp_exception);
2693
#endif
2694
2695
if (verbose)
2696
verboseOutput() << endl;
2697
2698
update_reducers();
2699
2700
} while(skip_remaining);
2701
2702
} // do_evaluation
2703
2704
if (verbose)
2705
{
2706
verboseOutput() << totalNrSimplices << " simplices";
2707
if(do_Hilbert_basis)
2708
verboseOutput() << ", " << CandidatesSize << " HB candidates";
2709
if(do_deg1_elements)
2710
verboseOutput() << ", " << CandidatesSize << " deg1 vectors";
2711
verboseOutput() << " accumulated." << endl;
2712
}
2713
2714
if (keep_triangulation) {
2715
Triangulation.splice(Triangulation.end(),TriangulationBuffer);
2716
} else {
2717
// #pragma omp critical(FREESIMPL)
2718
FreeSimpl.splice(FreeSimpl.begin(),TriangulationBuffer);
2719
}
2720
TriangulationBufferSize=0;
2721
2722
if (verbose && use_bottom_points) {
2723
size_t lss=LargeSimplices.size();
2724
if(lss>0)
2725
verboseOutput() << lss << " large simplices stored" << endl;
2726
}
2727
2728
for(size_t i=0;i<Results.size();++i)
2729
Results[i].transfer_candidates(); // any remaining ones
2730
2731
update_reducers();
2732
}
2733
2734
//---------------------------------------------------------------------------
2735
2736
template<typename Integer>
2737
void Full_Cone<Integer>::evaluate_large_simplices(){
2738
2739
size_t lss = LargeSimplices.size();
2740
if (lss == 0)
2741
return;
2742
2743
assert(omp_get_level()==omp_start_level);
2744
2745
if (verbose) {
2746
verboseOutput() << "Evaluating " << lss << " large simplices" << endl;
2747
}
2748
size_t j;
2749
for (j = 0; j < lss; ++j) {
2750
2751
INTERRUPT_COMPUTATION_BY_EXCEPTION
2752
2753
evaluate_large_simplex(j, lss);
2754
}
2755
2756
// decomposition might have created new simplices -- NO LONGER, now in Pyramids[0]
2757
// evaluate_triangulation();
2758
2759
// also new large simplices are possible
2760
/* if (!LargeSimplices.empty()) {
2761
use_bottom_points = false;
2762
lss += LargeSimplices.size();
2763
if (verbose) {
2764
verboseOutput() << "Continue evaluation of " << lss << " large simplices without new decompositions of simplicial cones." << endl;
2765
}
2766
for (; j < lss; ++j) {
2767
2768
INTERRUPT_COMPUTATION_BY_EXCEPTION
2769
2770
evaluate_large_simplex(j, lss);
2771
}
2772
}*/
2773
assert(LargeSimplices.empty());
2774
2775
for(size_t i=0;i<Results.size();++i)
2776
Results[i].transfer_candidates(); // any remaining ones
2777
2778
update_reducers();
2779
}
2780
2781
//---------------------------------------------------------------------------
2782
2783
template<typename Integer>
2784
void Full_Cone<Integer>::evaluate_large_simplex(size_t j, size_t lss) {
2785
if (verbose) {
2786
verboseOutput() << "Large simplex " << j+1 << " / " << lss << endl;
2787
}
2788
2789
if (do_deg1_elements && !do_h_vector && !do_Stanley_dec && !deg1_triangulation) {
2790
compute_deg1_elements_via_projection_simplicial(LargeSimplices.front().get_key());
2791
}
2792
else {
2793
LargeSimplices.front().Simplex_parallel_evaluation();
2794
if (do_Hilbert_basis && Results[0].get_collected_elements_size() > AdjustedReductionBound) {
2795
Results[0].transfer_candidates();
2796
update_reducers();
2797
}
2798
}
2799
LargeSimplices.pop_front();
2800
}
2801
2802
//---------------------------------------------------------------------------
2803
2804
template<typename Integer>
2805
void Full_Cone<Integer>::compute_deg1_elements_via_projection_simplicial(const vector<key_t>& key){
2806
2807
/* Full_Cone<Integer> SimplCone(Generators.submatrix(key));
2808
SimplCone.verbose=false; // verbose;
2809
SimplCone.Grading=Grading;
2810
SimplCone.is_Computed.set(ConeProperty::Grading);
2811
SimplCone.do_deg1_elements=true;
2812
SimplCone.do_approximation=true;
2813
2814
SimplCone.compute();*/
2815
Matrix<Integer> Gens=Generators.submatrix(key);
2816
Matrix<Integer> GradMat(0,dim);
2817
GradMat.append(Grading);
2818
Cone<Integer> ProjCone(Type::cone,Gens,Type::grading, GradMat);
2819
ProjCone.compute(ConeProperty::Projection);
2820
vector<vector<Integer> > Deg1=ProjCone.getDeg1Elements();
2821
Matrix<Integer> Supp=ProjCone.getSupportHyperplanesMatrix();;
2822
2823
vector<bool> Excluded(dim,false); // we want to discard duplicates
2824
for(size_t i=0;i<dim;++i){
2825
Integer test=v_scalar_product(Supp[i],Order_Vector);
2826
if(test>0)
2827
continue;
2828
if(test<0){
2829
Excluded[i]=true;
2830
continue;
2831
}
2832
size_t j;
2833
for(j=0;j<dim;++j){
2834
if(Supp[i][j]!=0)
2835
break;
2836
}
2837
if(Supp[i][j]<0)
2838
Excluded[i]=true;
2839
}
2840
2841
typename vector<vector<Integer> >::const_iterator E;
2842
for(E=Deg1.begin();E!=Deg1.end();++E){
2843
size_t i;
2844
for(i=0;i<dim;++i)
2845
if(v_scalar_product(*E,Supp[i])==0 && Excluded[i])
2846
break;
2847
if(i<dim)
2848
continue;
2849
2850
for(i=0;i<dim;++i) // exclude original generators
2851
if(*E==Gens[i])
2852
break;
2853
if(i==dim){
2854
Results[0].Deg1_Elements.push_back(*E); // Results[0].Deg1_Elements.push_back(*E);
2855
Results[0].collected_elements_size++;
2856
}
2857
}
2858
Results[0].transfer_candidates();
2859
}
2860
2861
2862
//---------------------------------------------------------------------------
2863
2864
template<typename Integer>
2865
void Full_Cone<Integer>::remove_duplicate_ori_gens_from_HB(){
2866
2867
return; //TODO reactivate!
2868
2869
set<vector<Integer> > OriGens;
2870
typename list<Candidate<Integer> >:: iterator c=OldCandidates.Candidates.begin();
2871
typename set<vector<Integer> >:: iterator found;
2872
for(;c!=OldCandidates.Candidates.end();){
2873
if(!c->original_generator){
2874
++c;
2875
continue;
2876
}
2877
found=OriGens.find(c->cand);
2878
if(found!=OriGens.end()){
2879
c=OldCandidates.Candidates.erase(c);
2880
}
2881
else{
2882
if(c->original_generator)
2883
OriGens.insert(c->cand);
2884
++c;
2885
}
2886
}
2887
}
2888
2889
//---------------------------------------------------------------------------
2890
2891
template<typename Integer>
2892
void Full_Cone<Integer>::primal_algorithm(){
2893
2894
primal_algorithm_initialize();
2895
2896
/***** Main Work is done in build_top_cone() *****/
2897
build_top_cone(); // evaluates if keep_triangulation==false
2898
/***** Main Work is done in build_top_cone() *****/
2899
2900
check_pointed();
2901
if(!pointed){
2902
throw NonpointedException();
2903
}
2904
2905
primal_algorithm_finalize();
2906
primal_algorithm_set_computed();
2907
}
2908
2909
//---------------------------------------------------------------------------
2910
2911
template<typename Integer>
2912
void Full_Cone<Integer>::primal_algorithm_initialize() {
2913
2914
prepare_inclusion_exclusion();
2915
2916
SimplexEval = vector< SimplexEvaluator<Integer> >(omp_get_max_threads(),SimplexEvaluator<Integer>(*this));
2917
for(size_t i=0;i<SimplexEval.size();++i)
2918
SimplexEval[i].set_evaluator_tn(i);
2919
Results = vector< Collector<Integer> >(omp_get_max_threads(),Collector<Integer>(*this));
2920
Hilbert_Series.setVerbose(verbose);
2921
}
2922
2923
//---------------------------------------------------------------------------
2924
2925
template<typename Integer>
2926
void Full_Cone<Integer>::primal_algorithm_finalize() {
2927
2928
if (isComputed(ConeProperty::Grading) && !deg1_generated) {
2929
deg1_triangulation = false;
2930
}
2931
if (keep_triangulation) {
2932
is_Computed.set(ConeProperty::Triangulation);
2933
}
2934
if (do_cone_dec) {
2935
is_Computed.set(ConeProperty::ConeDecomposition);
2936
}
2937
2938
evaluate_triangulation();
2939
assert(nrPyramids[0]==0);
2940
evaluate_large_simplices(); // can produce level 0 pyramids
2941
use_bottom_points=false; // block new attempts for subdivision
2942
evaluate_stored_pyramids(0); // in case subdivision took place
2943
evaluate_triangulation();
2944
FreeSimpl.clear();
2945
2946
compute_class_group();
2947
2948
// collect accumulated data from the SimplexEvaluators
2949
for (int zi=0; zi<omp_get_max_threads(); zi++) {
2950
detSum += Results[zi].getDetSum();
2951
multiplicity += Results[zi].getMultiplicitySum();
2952
if (do_h_vector) {
2953
Hilbert_Series += Results[zi].getHilbertSeriesSum();
2954
}
2955
}
2956
#ifdef NMZ_MIC_OFFLOAD
2957
// collect accumulated data from mics
2958
if (_Offload_get_device_number() < 0) // dynamic check for being on CPU (-1)
2959
{
2960
mic_offloader.finalize();
2961
}
2962
#endif // NMZ_MIC_OFFLOAD
2963
if (do_h_vector) {
2964
Hilbert_Series.collectData();
2965
}
2966
2967
if(verbose) {
2968
verboseOutput() << "Total number of pyramids = "<< totalNrPyr << ", among them simplicial " << nrSimplicialPyr << endl;
2969
// cout << "Uni "<< Unimod << " Ht1NonUni " << Ht1NonUni << " NonDecided " << NonDecided << " TotNonDec " << NonDecidedHyp<< endl;
2970
if(do_only_multiplicity)
2971
verboseOutput() << "Determinants computed = " << TotDet << endl;
2972
/* if(NrCompVect>0)
2973
cout << "Vector comparisons " << NrCompVect << " Value comparisons " << NrCompVal
2974
<< " Average " << NrCompVal/NrCompVect+1 << endl; */
2975
}
2976
2977
if(verbose && GMP_hyp+GMP_scal_prod+GMP_mat>0)
2978
verboseOutput() << "GMP transitions: matrices " << GMP_mat << " hyperplanes " << GMP_hyp << " vector operations " << GMP_scal_prod << endl;
2979
2980
}
2981
2982
//---------------------------------------------------------------------------
2983
2984
template<typename Integer>
2985
void Full_Cone<Integer>::make_module_gens(){
2986
2987
if(!inhomogeneous){
2988
NewCandidates.extract(ModuleGeneratorsOverOriginalMonoid);
2989
vector<Integer> Zero(dim,0);
2990
ModuleGeneratorsOverOriginalMonoid.push_front(Zero);
2991
// cout << "Mod " << endl;
2992
// Matrix<Integer>(ModuleGeneratorsOverOriginalMonoid).pretty_print(cout);
2993
// cout << "--------" << endl;
2994
is_Computed.set(ConeProperty::ModuleGeneratorsOverOriginalMonoid,true);
2995
return;
2996
}
2997
2998
CandidateList<Integer> Level1OriGens;
2999
for(size_t i=0;i<nr_gen;++i){
3000
if(gen_levels[i]==1){
3001
Level1OriGens.push_back(Candidate<Integer>(Generators[i],*this));
3002
}
3003
}
3004
CandidateList<Integer> Level1Generators=Level1OriGens;
3005
Candidate<Integer> new_cand(dim,Support_Hyperplanes.nr_of_rows());
3006
typename list<Candidate<Integer> >::const_iterator lnew,l1;
3007
for(lnew=NewCandidates.Candidates.begin();lnew!=NewCandidates.Candidates.end();++lnew){
3008
3009
INTERRUPT_COMPUTATION_BY_EXCEPTION
3010
3011
Integer level=v_scalar_product(lnew->cand,Truncation);
3012
if(level==1){
3013
new_cand=*lnew;
3014
Level1Generators.reduce_by_and_insert(new_cand,OldCandidates);
3015
}
3016
else{
3017
for(l1=Level1OriGens.Candidates.begin();l1!=Level1OriGens.Candidates.end();++l1){
3018
new_cand=sum(*l1,*lnew);
3019
Level1Generators.reduce_by_and_insert(new_cand,OldCandidates);
3020
}
3021
}
3022
}
3023
Level1Generators.extract(ModuleGeneratorsOverOriginalMonoid);
3024
ModuleGeneratorsOverOriginalMonoid.sort();
3025
ModuleGeneratorsOverOriginalMonoid.unique();
3026
is_Computed.set(ConeProperty::ModuleGeneratorsOverOriginalMonoid,true);
3027
3028
for (size_t i = 0; i <nr_gen; i++) { // the level 1 input generators have not yet ben inserted into OldCandidates
3029
if(gen_levels[i]==1) { // but they are needed for the truncated Hilbert basis com�putation
3030
NewCandidates.Candidates.push_back(Candidate<Integer>(Generators[i],*this));
3031
NewCandidates.Candidates.back().original_generator=true;
3032
}
3033
}
3034
}
3035
3036
//---------------------------------------------------------------------------
3037
3038
template<typename Integer>
3039
void Full_Cone<Integer>::make_module_gens_and_extract_HB(){
3040
3041
make_module_gens();
3042
3043
NewCandidates.divide_sortdeg_by2(); // was previously multplied by 2
3044
NewCandidates.sort_by_deg();
3045
3046
OldCandidates.merge(NewCandidates);
3047
OldCandidates.auto_reduce();
3048
}
3049
3050
3051
//---------------------------------------------------------------------------
3052
3053
template<typename Integer>
3054
void Full_Cone<Integer>::primal_algorithm_set_computed() {
3055
3056
extreme_rays_and_deg1_check();
3057
if(!pointed){
3058
throw NonpointedException();
3059
}
3060
3061
if (do_triangulation || do_partial_triangulation) {
3062
is_Computed.set(ConeProperty::TriangulationSize,true);
3063
if (do_evaluation) {
3064
is_Computed.set(ConeProperty::TriangulationDetSum,true);
3065
}
3066
}
3067
if (do_triangulation && do_evaluation && isComputed(ConeProperty::Grading))
3068
is_Computed.set(ConeProperty::Multiplicity,true);
3069
3070
INTERRUPT_COMPUTATION_BY_EXCEPTION
3071
3072
if (do_Hilbert_basis) {
3073
if(do_module_gens_intcl){
3074
make_module_gens_and_extract_HB();
3075
}
3076
OldCandidates.sort_by_val();
3077
OldCandidates.extract(Hilbert_Basis);
3078
OldCandidates.Candidates.clear();
3079
Hilbert_Basis.unique();
3080
is_Computed.set(ConeProperty::HilbertBasis,true);
3081
if (isComputed(ConeProperty::Grading)) {
3082
select_deg1_elements();
3083
check_deg1_hilbert_basis();
3084
}
3085
}
3086
3087
INTERRUPT_COMPUTATION_BY_EXCEPTION
3088
3089
if (do_deg1_elements) {
3090
for(size_t i=0;i<nr_gen;i++)
3091
if(v_scalar_product(Grading,Generators[i])==1 && (!(is_approximation || is_global_approximation)
3092
|| subcone_contains(Generators[i])))
3093
Deg1_Elements.push_front(Generators[i]);
3094
is_Computed.set(ConeProperty::Deg1Elements,true);
3095
Deg1_Elements.sort();
3096
Deg1_Elements.unique();
3097
}
3098
3099
INTERRUPT_COMPUTATION_BY_EXCEPTION
3100
3101
if (do_h_vector) {
3102
Hilbert_Series.setShift(convertTo<long>(shift));
3103
Hilbert_Series.adjustShift();
3104
// now the shift in the HilbertSeries may change and we would have to adjust
3105
// the shift, the grading and more in the Full_Cone to continue to add data!
3106
// COMPUTE HSOP here
3107
if (do_hsop){
3108
compute_hsop();
3109
is_Computed.set(ConeProperty::HSOP);
3110
}
3111
Hilbert_Series.simplify();
3112
is_Computed.set(ConeProperty::HilbertSeries);
3113
}
3114
if(do_Stanley_dec){
3115
is_Computed.set(ConeProperty::StanleyDec);
3116
}
3117
3118
}
3119
3120
3121
//---------------------------------------------------------------------------
3122
// Normaliz modes (public)
3123
//---------------------------------------------------------------------------
3124
3125
// check the do_* bools, they must be set in advance
3126
// this method (de)activate them according to dependencies between them
3127
template<typename Integer>
3128
void Full_Cone<Integer>::do_vars_check(bool with_default) {
3129
3130
do_extreme_rays=true; // we always want to do this if compute() is called
3131
3132
/* if (do_default_mode && with_default) {
3133
do_Hilbert_basis = true;
3134
do_h_vector = true;
3135
if(!inhomogeneous)
3136
do_class_group=true;
3137
}
3138
*/
3139
3140
if (do_integrally_closed) {
3141
if (do_Hilbert_basis) {
3142
do_integrally_closed = false; // don't interrupt the computation
3143
} else {
3144
do_Hilbert_basis = true;
3145
}
3146
}
3147
3148
// activate implications
3149
if (do_module_gens_intcl) do_Hilbert_basis= true;
3150
if (do_module_gens_intcl) use_bottom_points= false;
3151
//if (do_hsop) do_Hilbert_basis = true;
3152
if (do_Stanley_dec) keep_triangulation = true;
3153
if (do_cone_dec) keep_triangulation = true;
3154
if (keep_triangulation) do_determinants = true;
3155
if (do_multiplicity) do_determinants = true;
3156
if ((do_multiplicity || do_h_vector) && inhomogeneous) do_module_rank = true;
3157
if (do_determinants) do_triangulation = true;
3158
if (do_h_vector && (with_default || explicit_h_vector)) do_triangulation = true;
3159
if (do_deg1_elements) do_partial_triangulation = true;
3160
if (do_Hilbert_basis) do_partial_triangulation = true;
3161
// activate
3162
do_only_multiplicity = do_determinants;
3163
stop_after_cone_dec = true;
3164
if(do_cone_dec) do_only_multiplicity=false;
3165
3166
if (do_Stanley_dec || do_h_vector || do_deg1_elements
3167
|| do_Hilbert_basis) {
3168
do_only_multiplicity = false;
3169
stop_after_cone_dec = false;
3170
do_evaluation = true;
3171
}
3172
if (do_determinants) do_evaluation = true;
3173
3174
// deactivate
3175
if (do_triangulation) do_partial_triangulation = false;
3176
if (do_Hilbert_basis) do_deg1_elements = false; // they will be extracted later
3177
3178
}
3179
3180
3181
// general purpose compute method
3182
// do_* bools must be set in advance, this method does sanity checks for it
3183
// if no bool is set it does support hyperplanes and extreme rays
3184
template<typename Integer>
3185
void Full_Cone<Integer>::compute() {
3186
3187
omp_start_level=omp_get_level();
3188
3189
if(dim==0){
3190
set_zero_cone();
3191
return;
3192
}
3193
3194
3195
do_vars_check(false);
3196
explicit_full_triang=do_triangulation; // to distinguish it from do_triangulation via default mode
3197
if(do_default_mode)
3198
do_vars_check(true);
3199
3200
if(inhomogeneous){
3201
if(do_default_mode && !do_Hilbert_basis && !isComputed(ConeProperty::Grading) &&isComputed(ConeProperty::ExtremeRays))
3202
return;
3203
}
3204
3205
start_message();
3206
3207
if(Support_Hyperplanes.nr_of_rows()==0 && !do_Hilbert_basis && !do_h_vector && !do_multiplicity && !do_deg1_elements
3208
&& !do_Stanley_dec && !do_triangulation && !do_determinants)
3209
assert(Generators.max_rank_submatrix_lex().size() == dim);
3210
3211
minimize_support_hyperplanes(); // if they are given
3212
if (inhomogeneous)
3213
set_levels();
3214
3215
check_given_grading();
3216
3217
if ((!do_triangulation && !do_partial_triangulation)
3218
|| (Grading.size()>0 && !isComputed(ConeProperty::Grading))){
3219
// in the second case there are only two possibilities:
3220
// either nonpointed or bad grading
3221
do_triangulation=false;
3222
do_partial_triangulation=false;
3223
support_hyperplanes();
3224
}
3225
else{
3226
// look for a grading if it is needed
3227
find_grading();
3228
if(isComputed(ConeProperty::IsPointed) && !pointed){
3229
end_message();
3230
return;
3231
}
3232
3233
if (!isComputed(ConeProperty::Grading))
3234
disable_grading_dep_comp();
3235
3236
bool polyhedron_is_polytope=inhomogeneous;
3237
if(inhomogeneous){
3238
find_level0_dim();
3239
for(size_t i=0;i<nr_gen;++i)
3240
if(gen_levels[i]==0){
3241
polyhedron_is_polytope=false;
3242
break;
3243
}
3244
}
3245
3246
set_degrees();
3247
sort_gens_by_degree(true);
3248
3249
if(do_approximation && !deg1_generated){
3250
if(!isComputed(ConeProperty::ExtremeRays) || !isComputed(ConeProperty::SupportHyperplanes)){
3251
do_extreme_rays=true;
3252
dualize_cone(false);// no start or end message
3253
}
3254
if(verbose)
3255
verboseOutput() << "Approximating rational by lattice polytope" << endl;
3256
if(do_deg1_elements){
3257
compute_deg1_elements_via_approx_global();
3258
is_Computed.set(ConeProperty::Deg1Elements,true);
3259
if(do_triangulation){
3260
do_deg1_elements=false;
3261
do_partial_triangulation=false;
3262
do_only_multiplicity = do_determinants;
3263
primal_algorithm();
3264
}
3265
} else { // now we want subdividing elements for a simplicial cone
3266
assert(do_Hilbert_basis);
3267
compute_elements_via_approx(Hilbert_Basis);
3268
}
3269
3270
}
3271
else{
3272
if(polyhedron_is_polytope && (do_Hilbert_basis || do_h_vector || do_multiplicity)){ // inthis situation we must find the
3273
convert_polyhedron_to_polytope(); // lattice points in a polytope
3274
}
3275
else{
3276
if(do_partial_triangulation || do_triangulation)
3277
primal_algorithm();
3278
else
3279
return;
3280
}
3281
}
3282
3283
if(inhomogeneous){
3284
find_module_rank();
3285
// cout << "module rank " << module_rank << endl;
3286
}
3287
3288
}
3289
3290
end_message();
3291
}
3292
3293
template<typename Integer>
3294
void Full_Cone<Integer>::compute_hsop(){
3295
vector<long> hsop_deg(dim,1);
3296
// if all extreme rays are in degree one, there is nothing to compute
3297
if (!isDeg1ExtremeRays()){
3298
if(verbose){
3299
verboseOutput() << "Computing heights ... " << flush;
3300
}
3301
3302
vector<bool> choice = Extreme_Rays_Ind;
3303
if (inhomogeneous){
3304
for (size_t i=0; i<Generators.nr_of_rows(); i++) {
3305
if (Extreme_Rays_Ind[i] && v_scalar_product(Generators[i],Truncation) != 0) {
3306
choice[i]=false;
3307
}
3308
}
3309
}
3310
Matrix<Integer> ER = Generators.submatrix(choice);
3311
Matrix<Integer> SH = getSupportHyperplanes();
3312
if (inhomogeneous){
3313
Sublattice_Representation<Integer> recession_lattice(ER,true);
3314
Matrix<Integer> SH_raw = recession_lattice.to_sublattice_dual(SH);
3315
Matrix<Integer> ER_embedded = recession_lattice.to_sublattice(ER);
3316
Full_Cone<Integer> recession_cone(ER_embedded);
3317
recession_cone.Support_Hyperplanes = SH_raw;
3318
recession_cone.dualize_cone();
3319
SH = recession_lattice.from_sublattice_dual(recession_cone.getSupportHyperplanes());
3320
}
3321
vector<size_t> ideal_heights(ER.nr_of_rows(),1);
3322
// the heights vector is clear in the simplicial case
3323
if (is_simplicial){
3324
for (size_t j=0;j<ideal_heights.size();j++) ideal_heights[j]=j+1;
3325
} else {
3326
list<pair<boost::dynamic_bitset<> , size_t>> facet_list;
3327
list<vector<key_t>> facet_keys;
3328
vector<key_t> key;
3329
size_t d = dim;
3330
if (inhomogeneous) d = level0_dim;
3331
for (size_t i=SH.nr_of_rows();i-->0;){
3332
boost::dynamic_bitset<> new_facet(ER.nr_of_rows());
3333
key.clear();
3334
for (size_t j=0;j<ER.nr_of_rows();j++){
3335
if (v_scalar_product(SH[i],ER[j])==0){
3336
new_facet[new_facet.size()-1-j]=1;
3337
} else {
3338
key.push_back(j);
3339
}
3340
}
3341
facet_list.push_back(make_pair(new_facet,d-1));
3342
facet_keys.push_back(key);
3343
}
3344
facet_list.sort(); // should be sorted lex
3345
//~ cout << "FACETS:" << endl;
3346
//~ //cout << "size: " << facet_list.size() << " | " << facet_list << endl;
3347
//~ for (auto jt=facet_list.begin();jt!=facet_list.end();++jt){
3348
//~ cout << jt->first << " | " << jt->second << endl;
3349
//~ }
3350
//cout << "facet non_keys: " << facet_keys << endl;
3351
heights(facet_keys,facet_list,ER.nr_of_rows()-1,ideal_heights,d-1);
3352
}
3353
if(verbose){
3354
verboseOutput() << "done." << endl;
3355
assert(ideal_heights[ER.nr_of_rows()-1]==dim);
3356
verboseOutput() << "Heights vector: " << ideal_heights << endl;
3357
}
3358
vector<Integer> er_deg = ER.MxV(Grading);
3359
hsop_deg = convertTo<vector<long> >(degrees_hsop(er_deg,ideal_heights));
3360
}
3361
if(verbose){
3362
verboseOutput() << "Degrees of HSOP: " << hsop_deg << endl;
3363
}
3364
Hilbert_Series.setHSOPDenom(hsop_deg);
3365
}
3366
3367
3368
3369
// recursive method to compute the heights
3370
// TODO: at the moment: facets are a parameter. global would be better
3371
template<typename Integer>
3372
void Full_Cone<Integer>::heights(list<vector<key_t>>& facet_keys,list<pair<boost::dynamic_bitset<>,size_t>> faces, size_t index,vector<size_t>& ideal_heights,size_t max_dim){
3373
// since we count the index backwards, this is the actual nr of the extreme ray
3374
size_t ER_nr = ideal_heights.size()-index-1;
3375
//~ cout << "starting calculation for extreme ray nr " << ER_nr << endl;
3376
list<pair<boost::dynamic_bitset<>,size_t>> not_faces;
3377
auto face_it=faces.begin();
3378
for (;face_it!=faces.end();++face_it){
3379
if (face_it->first.test(index)){ // check whether index is set
3380
break;
3381
}
3382
// resize not_faces
3383
face_it->first.resize(index);
3384
}
3385
not_faces.splice(not_faces.begin(),faces,faces.begin(),face_it);
3386
//~ cout << "faces not containing it:" << endl;
3387
//~ for (auto jt=not_faces.begin();jt!=not_faces.end();++jt){
3388
//~ cout << jt->first << " | " << jt->second << endl;
3389
//~ }
3390
//~ cout << "faces containing it:" << endl;
3391
//~ for (auto jt=faces.begin();jt!=faces.end();++jt){
3392
//~ cout << jt->first << " | " << jt->second << endl;
3393
//~ }
3394
auto not_faces_it=not_faces.begin();
3395
// update the heights
3396
if (ER_nr>0){
3397
if (!not_faces.empty()){
3398
ideal_heights[ER_nr] = ideal_heights[ER_nr-1];
3399
// compute the dimensions of not_faces
3400
vector<bool> choice = Extreme_Rays_Ind;
3401
if (inhomogeneous){
3402
for (size_t i=0; i<Generators.nr_of_rows(); i++) {
3403
if (Extreme_Rays_Ind[i] && v_scalar_product(Generators[i],Truncation) != 0) {
3404
choice[i]=false;
3405
}
3406
}
3407
}
3408
Matrix<Integer> ER = Generators.submatrix(choice);
3409
int tn;
3410
if(omp_get_level()==omp_start_level)
3411
tn=0;
3412
else tn = omp_get_ancestor_thread_num(omp_start_level+1);
3413
Matrix<Integer>& Test = Top_Cone->RankTest[tn];
3414
vector<key_t> face_key;
3415
for (;not_faces_it!=not_faces.end();++not_faces_it){
3416
if (not_faces_it->second==0){ // dimension has not yet been computed
3417
// generate the key vector
3418
face_key.resize(0);
3419
for (size_t i=0;i<not_faces_it->first.size();++i){
3420
if (not_faces_it->first.test(i)){
3421
face_key.push_back(ER.nr_of_rows()-1-i);
3422
}
3423
}
3424
not_faces_it->second = Test.rank_submatrix(ER,face_key);
3425
}
3426
if (not_faces_it->second==max_dim) break;
3427
}
3428
if (not_faces_it==not_faces.end()) {
3429
--max_dim;
3430
ideal_heights[ER_nr] = ideal_heights[ER_nr-1]+1;
3431
}
3432
} else {
3433
ideal_heights[ER_nr] = ideal_heights[ER_nr-1]+1;
3434
--max_dim;
3435
}
3436
}
3437
// we computed all the heights
3438
if (index==0) return;
3439
// if inner point, we can skip now
3440
3441
// take the union of all faces not containing the current extreme ray
3442
boost::dynamic_bitset<> union_faces(index);
3443
not_faces_it = not_faces.begin();
3444
for (;not_faces_it!=not_faces.end();++not_faces_it){
3445
union_faces |= not_faces_it->first; // take the union
3446
}
3447
//cout << "Their union: " << union_faces << endl;
3448
// the not_faces now already have a size one smaller
3449
union_faces.resize(index+1);
3450
list<pair<boost::dynamic_bitset<>,size_t>> new_faces;
3451
// delete all facets which only consist of the previous extreme rays
3452
auto facet_it=facet_keys.begin();
3453
size_t counter=0;
3454
while(facet_it!=facet_keys.end()){
3455
3456
INTERRUPT_COMPUTATION_BY_EXCEPTION
3457
3458
counter=0;
3459
for (size_t i=0;i<facet_it->size();i++){
3460
if (facet_it->at(i)<=ER_nr) continue;
3461
// now we only have new extreme rays
3462
counter = i;
3463
break;
3464
}
3465
size_t j=ER_nr+1;
3466
for (;j<ideal_heights.size();j++){
3467
if (facet_it->at(counter)!=j){ // facet contains the element j
3468
break;
3469
} else if (counter < facet_it->size()-1) counter++;
3470
}
3471
if (j==ideal_heights.size()){
3472
facet_it = facet_keys.erase(facet_it);
3473
} else ++facet_it;
3474
}
3475
facet_it=facet_keys.begin();
3476
3477
// main loop
3478
for (;facet_it!=facet_keys.end();++facet_it){
3479
3480
INTERRUPT_COMPUTATION_BY_EXCEPTION
3481
3482
// check whether the facet is contained in the faces not containing the generator
3483
// and the previous generators
3484
// and check whether the generator is in the facet
3485
// check whether intersection with facet contributes
3486
bool not_containing_el =false;
3487
// bool whether the facet contains an element which is NOT in the faces not containing the current extreme ray
3488
bool containing_critical_el=false;
3489
counter=0;
3490
//cout << "check critical for facet " << *it << endl;
3491
for (size_t i=0;i<facet_it->size();i++){
3492
if (facet_it->at(i)==ER_nr){
3493
not_containing_el = true;
3494
}
3495
if (facet_it->at(i)<=ER_nr && i<facet_it->size()-1) continue;
3496
counter=i; // now we have elements which are bigger than the current extreme ray
3497
if (not_containing_el){
3498
for (size_t j=ER_nr+1;j<ideal_heights.size();j++){
3499
if (facet_it->at(counter)!=j){ // i.e. j is in the facet
3500
if (!union_faces.test(ideal_heights.size()-1-j)){
3501
containing_critical_el = true;
3502
break;
3503
}
3504
} else if (counter<facet_it->size()-1) counter++;
3505
}
3506
}
3507
break;
3508
}
3509
if(not_containing_el && containing_critical_el){ //facet contributes
3510
//cout << "Taking intersections with the facet " << *facet_it << endl;
3511
face_it =faces.begin();
3512
for (;face_it!=faces.end();++face_it){
3513
boost::dynamic_bitset<> intersection(face_it->first);
3514
for (size_t i=0;i<facet_it->size();i++){
3515
if (facet_it->at(i)>ER_nr) intersection.set(ideal_heights.size()-1-facet_it->at(i),false);
3516
}
3517
intersection.resize(index);
3518
if (intersection.any()){
3519
// check whether the intersection lies in any of the not_faces
3520
not_faces_it = not_faces.begin();
3521
for (;not_faces_it!=not_faces.end();++not_faces_it){
3522
if (intersection.is_subset_of(not_faces_it->first)) break;
3523
}
3524
if (not_faces_it== not_faces.end()) new_faces.push_back(make_pair(intersection,0));
3525
}
3526
}
3527
}
3528
}
3529
// the new faces need to be sort in lex order anyway. this can be used to reduce operations
3530
// for subset checking
3531
new_faces.sort();
3532
auto outer_it = new_faces.begin();
3533
auto inner_it = new_faces.begin();
3534
for (;outer_it!=new_faces.end();++outer_it){
3535
3536
INTERRUPT_COMPUTATION_BY_EXCEPTION
3537
3538
// work with a not-key vector
3539
vector<key_t> face_not_key;
3540
for (size_t i=0;i<outer_it->first.size();i++){
3541
if (!outer_it->first.test(i)){
3542
face_not_key.push_back(i);
3543
}
3544
}
3545
inner_it=new_faces.begin();
3546
size_t i=0;
3547
while (inner_it!=outer_it){
3548
i=0;
3549
for (;i<face_not_key.size();++i){
3550
if (inner_it->first.test(face_not_key[i])) break; //inner_it has an element which is not in outer_it
3551
}
3552
if (i==face_not_key.size()){
3553
inner_it = new_faces.erase(inner_it); //inner_it is a subface of outer_it
3554
} else ++inner_it;
3555
}
3556
}
3557
new_faces.merge(not_faces);
3558
/*cout << "The new faces: " << endl;
3559
for (auto jt=new_faces.begin();jt!=new_faces.end();++jt){
3560
cout << jt->first << " | " << jt->second << endl;
3561
}*/
3562
3563
heights(facet_keys,new_faces,index-1,ideal_heights,max_dim);
3564
}
3565
3566
3567
3568
template<typename Integer>
3569
void Full_Cone<Integer>::convert_polyhedron_to_polytope() {
3570
3571
if(verbose){
3572
verboseOutput() << "Converting polyhedron to polytope" << endl;
3573
verboseOutput() << "Pointed since cone over polytope" << endl;
3574
}
3575
pointed=true;
3576
is_Computed.set(ConeProperty::IsPointed);
3577
Full_Cone Polytope(Generators);
3578
Polytope.pointed=true;
3579
Polytope.is_Computed.set(ConeProperty::IsPointed);
3580
Polytope.keep_order=true;
3581
Polytope.Grading=Truncation;
3582
Polytope.is_Computed.set(ConeProperty::Grading);
3583
if(isComputed(ConeProperty::SupportHyperplanes)){
3584
Polytope.Support_Hyperplanes=Support_Hyperplanes;
3585
Polytope.nrSupport_Hyperplanes=nrSupport_Hyperplanes;
3586
Polytope.is_Computed.set(ConeProperty::SupportHyperplanes);
3587
}
3588
if(isComputed(ConeProperty::ExtremeRays)){
3589
Polytope.Extreme_Rays_Ind=Extreme_Rays_Ind;
3590
Polytope.is_Computed.set(ConeProperty::ExtremeRays);
3591
}
3592
Polytope.do_deg1_elements=true;
3593
Polytope.verbose=verbose;
3594
Polytope.compute();
3595
if(Polytope.isComputed(ConeProperty::SupportHyperplanes) &&
3596
!isComputed(ConeProperty::SupportHyperplanes)){
3597
Support_Hyperplanes=Polytope.Support_Hyperplanes;
3598
nrSupport_Hyperplanes=Polytope.nrSupport_Hyperplanes;
3599
is_Computed.set(ConeProperty::SupportHyperplanes);
3600
}
3601
if(Polytope.isComputed(ConeProperty::ExtremeRays) &&
3602
!isComputed(ConeProperty::ExtremeRays)){
3603
Extreme_Rays_Ind=Polytope.Extreme_Rays_Ind;
3604
is_Computed.set(ConeProperty::ExtremeRays);
3605
}
3606
if(Polytope.isComputed(ConeProperty::Deg1Elements)){
3607
module_rank=Polytope.Deg1_Elements.size();
3608
if(do_Hilbert_basis){
3609
Hilbert_Basis=Polytope.Deg1_Elements;
3610
is_Computed.set(ConeProperty::HilbertBasis);
3611
}
3612
is_Computed.set(ConeProperty::ModuleRank);
3613
if(isComputed(ConeProperty::Grading) && module_rank>0){
3614
multiplicity=1; // of the recession cone;
3615
is_Computed.set(ConeProperty::Multiplicity);
3616
if(do_h_vector){
3617
vector<num_t> hv(1);
3618
typename list<vector<Integer> >::const_iterator hb=Polytope.Deg1_Elements.begin();
3619
for(;hb!=Polytope.Deg1_Elements.end();++hb){
3620
size_t deg = convertTo<long>(v_scalar_product(Grading,*hb));
3621
if(deg+1>hv.size())
3622
hv.resize(deg+1);
3623
hv[deg]++;
3624
}
3625
Hilbert_Series.add(hv,vector<denom_t>());
3626
Hilbert_Series.setShift(convertTo<long>(shift));
3627
Hilbert_Series.adjustShift();
3628
Hilbert_Series.simplify();
3629
is_Computed.set(ConeProperty::HilbertSeries);
3630
}
3631
}
3632
}
3633
}
3634
3635
//---------------------------------------------------------------------------
3636
3637
template<typename Integer>
3638
void Full_Cone<Integer>::compute_deg1_elements_via_approx_global() {
3639
3640
compute_elements_via_approx(Deg1_Elements);
3641
3642
/* typename list<vector<Integer> >::iterator e; // now already done in simplex.cpp and directly for generators
3643
for(e=Deg1_Elements.begin(); e!=Deg1_Elements.end();)
3644
if(!contains(*e))
3645
e=Deg1_Elements.erase(e);
3646
else
3647
++e; */
3648
if(verbose)
3649
verboseOutput() << Deg1_Elements.size() << " deg 1 elements found" << endl;
3650
}
3651
3652
//---------------------------------------------------------------------------
3653
3654
template<typename Integer>
3655
void Full_Cone<Integer>::compute_elements_via_approx(list<vector<Integer> >& elements_from_approx) {
3656
3657
if (!isComputed(ConeProperty::Grading)){
3658
support_hyperplanes(); // the only thing we can do now
3659
return;
3660
}
3661
assert(elements_from_approx.empty());
3662
vector<list<vector<Integer>>> approx_points = latt_approx();
3663
vector<vector<key_t>> approx_points_indices;
3664
key_t current_key =0;
3665
//cout << "Approximation points: " << endl;
3666
//for (size_t j=0;j<dim;++j){
3667
////cout << "Original generator " << j << ": " << Generators[j] << endl;
3668
////cout << approx_points[j] << endl;
3669
//}
3670
//cout << "Nr of ER: " << getExtremeRays().size() << endl;
3671
//Matrix<Integer> all_approx_points(Generators);
3672
Matrix<Integer> all_approx_points(0,dim);
3673
for (size_t i=0;i<nr_gen;i++){
3674
vector<key_t> indices(approx_points[i].size());
3675
if (!approx_points[i].empty()){
3676
3677
all_approx_points.append(approx_points[i]);
3678
for (size_t j=0;j<approx_points[i].size();++j){
3679
indices[j]=current_key;
3680
current_key++;
3681
}
3682
3683
}
3684
approx_points_indices.push_back(indices);
3685
}
3686
if(verbose){
3687
verboseOutput() << "Nr original generators: " << nr_gen << endl;
3688
verboseOutput() << "Nr approximation points: " << all_approx_points.nr_of_rows() << endl;
3689
}
3690
Full_Cone C_approx(all_approx_points);
3691
C_approx.OriginalGenerators = Generators;
3692
C_approx.approx_points_keys = approx_points_indices;
3693
C_approx.verbose = verbose;
3694
//C_temp.build_cone_approx(*this,approx_points_indices);
3695
3696
3697
//Full_Cone C_approx(C_temp.getGenerators());
3698
//Full_Cone C_approx(all_approx_points); // latt_approx computes a matrix of generators
3699
3700
C_approx.is_approximation=true;
3701
// C_approx.Generators.pretty_print(cout);
3702
C_approx.do_deg1_elements=do_deg1_elements; // for supercone C_approx that is generated in degree 1
3703
C_approx.do_Hilbert_basis=do_Hilbert_basis;
3704
C_approx.do_all_hyperplanes=false; // not needed
3705
C_approx.Subcone_Support_Hyperplanes=Support_Hyperplanes; // *this is a subcone of C_approx, used to discard elements
3706
C_approx.Support_Hyperplanes=Support_Hyperplanes; // UNFORTUNATELY NEEDED IN REDUCTION FOR SUBDIVIUSION BY APPROX
3707
C_approx.is_Computed.set(ConeProperty::SupportHyperplanes);
3708
C_approx.nrSupport_Hyperplanes = nrSupport_Hyperplanes;
3709
C_approx.Grading = Grading;
3710
C_approx.is_Computed.set(ConeProperty::Grading);
3711
C_approx.Truncation=Truncation;
3712
C_approx.TruncLevel=TruncLevel;
3713
3714
//if(verbose)
3715
//verboseOutput() << "Computing elements in approximating cone with "
3716
//<< C_approx.Generators.nr_of_rows() << " generators." << endl;
3717
if(verbose){
3718
verboseOutput() << "Computing elements in approximating cone." << endl;
3719
}
3720
3721
bool verbose_tmp = verbose;
3722
verbose =false;
3723
C_approx.compute();
3724
verbose = verbose_tmp;
3725
3726
//vector<key_t> used_gens;
3727
//for (size_t j=0;j<nr_gen;++j){
3728
//if (C_approx.in_triang[j]) used_gens.push_back(j);
3729
//}
3730
//C_approx.Generators = C_approx.Generators.submatrix(used_gens);
3731
//if (verbose){
3732
//verboseOutput() << "Used "<< C_approx.Generators.nr_of_rows() << " / " << C_approx.nr_gen << " generators." << endl;
3733
//}
3734
//C_approx.nr_gen=C_approx.Generators.nr_of_rows();
3735
3736
3737
// TODO: with the current implementation, this is always the case!
3738
if(!C_approx.contains(*this) || Grading!=C_approx.Grading){
3739
throw FatalException("Wrong approximating cone.");
3740
}
3741
3742
if(verbose)
3743
verboseOutput() << "Sum of dets of simplicial cones evaluated in approximation = " << C_approx.detSum << endl;
3744
3745
if(verbose)
3746
verboseOutput() << "Returning to original cone" << endl;
3747
if(do_deg1_elements)
3748
elements_from_approx.splice(elements_from_approx.begin(),C_approx.Deg1_Elements);
3749
if(do_Hilbert_basis)
3750
elements_from_approx.splice(elements_from_approx.begin(),C_approx.Hilbert_Basis);
3751
}
3752
3753
3754
// -s
3755
template<typename Integer>
3756
void Full_Cone<Integer>::support_hyperplanes() {
3757
if(!isComputed(ConeProperty::SupportHyperplanes)){
3758
sort_gens_by_degree(false); // we do not want to triangulate here
3759
build_top_cone();
3760
}
3761
extreme_rays_and_deg1_check();
3762
if(inhomogeneous){
3763
find_level0_dim();
3764
if(do_module_rank)
3765
find_module_rank();
3766
}
3767
compute_class_group();
3768
}
3769
3770
//---------------------------------------------------------------------------
3771
// Checks and auxiliary algorithms
3772
//---------------------------------------------------------------------------
3773
3774
template<typename Integer>
3775
void Full_Cone<Integer>::extreme_rays_and_deg1_check() {
3776
check_pointed();
3777
if(!pointed){
3778
throw NonpointedException();
3779
}
3780
//cout << "Generators" << endl;
3781
//Generators.pretty_print(cout);
3782
//cout << "SupportHyperplanes" << endl;
3783
//Support_Hyperplanes.pretty_print(cout);
3784
compute_extreme_rays();
3785
deg1_check();
3786
}
3787
3788
//---------------------------------------------------------------------------
3789
3790
template<typename Integer>
3791
void Full_Cone<Integer>::check_given_grading(){
3792
3793
if(Grading.size()==0)
3794
return;
3795
3796
bool positively_graded=true;
3797
3798
if(!isComputed(ConeProperty::Grading)){
3799
size_t neg_index=0;
3800
Integer neg_value;
3801
bool nonnegative=true;
3802
vector<Integer> degrees = Generators.MxV(Grading);
3803
for (size_t i=0; i<degrees.size(); ++i) {
3804
if (degrees[i]<=0 && (!inhomogeneous || gen_levels[i]==0)) {
3805
// in the inhomogeneous case: test only generators of tail cone
3806
positively_graded=false;;
3807
if(degrees[i]<0){
3808
nonnegative=false;
3809
neg_index=i;
3810
neg_value=degrees[i];
3811
}
3812
}
3813
}
3814
3815
if(!nonnegative){
3816
throw BadInputException("Grading gives negative value "
3817
+ toString(neg_value) + " for generator "
3818
+ toString(neg_index+1) + "!");
3819
}
3820
}
3821
3822
if(positively_graded){
3823
is_Computed.set(ConeProperty::Grading);
3824
if(inhomogeneous)
3825
find_grading_inhom();
3826
set_degrees();
3827
}
3828
}
3829
3830
//---------------------------------------------------------------------------
3831
3832
template<typename Integer>
3833
void Full_Cone<Integer>::find_grading(){
3834
3835
if(inhomogeneous) // in the inhomogeneous case we do not allow implicit grading
3836
return;
3837
3838
deg1_check(); // trying to find grading under which all generators have the same degree
3839
if (!isComputed(ConeProperty::Grading) && (do_multiplicity || do_deg1_elements || do_h_vector)) {
3840
if (!isComputed(ConeProperty::ExtremeRays)) {
3841
if (verbose) {
3842
verboseOutput() << "Cannot find grading s.t. all generators have the degree 1! Computing Extreme rays first:" << endl;
3843
}
3844
get_supphyps_from_copy(true);
3845
extreme_rays_and_deg1_check();
3846
if(!pointed){
3847
throw NonpointedException();
3848
};
3849
3850
// We keep the SupportHyperplanes, so we do not need to recompute them
3851
// for the last generator, and use them to make a global reduction earlier
3852
}
3853
}
3854
}
3855
3856
//---------------------------------------------------------------------------
3857
3858
template<typename Integer>
3859
void Full_Cone<Integer>::find_level0_dim(){
3860
3861
if(!isComputed(ConeProperty::Generators)){
3862
throw FatalException("Missing Generators.");
3863
}
3864
3865
Matrix<Integer> Help(nr_gen,dim);
3866
for(size_t i=0; i<nr_gen;++i)
3867
if(gen_levels[i]==0)
3868
Help[i]=Generators[i];
3869
3870
ProjToLevel0Quot=Help.kernel();
3871
3872
level0_dim=dim-ProjToLevel0Quot.nr_of_rows();
3873
is_Computed.set(ConeProperty::RecessionRank);
3874
}
3875
3876
//---------------------------------------------------------------------------
3877
3878
template<typename Integer>
3879
void Full_Cone<Integer>::find_module_rank(){
3880
3881
if(isComputed(ConeProperty::ModuleRank))
3882
return;
3883
3884
if(level0_dim==dim){
3885
module_rank=0;
3886
is_Computed.set(ConeProperty::ModuleRank);
3887
return;
3888
}
3889
if(isComputed(ConeProperty::HilbertBasis)){
3890
find_module_rank_from_HB();
3891
return;
3892
}
3893
3894
// size_t HBrank = module_rank;
3895
3896
if(do_module_rank)
3897
find_module_rank_from_proj();
3898
3899
/* if(isComputed(ConeProperty::HilbertBasis))
3900
assert(HBrank==module_rank);
3901
*/
3902
3903
}
3904
3905
//---------------------------------------------------------------------------
3906
3907
template<typename Integer>
3908
void Full_Cone<Integer>::find_module_rank_from_proj(){
3909
3910
if(verbose){
3911
verboseOutput() << "Computing projection to quotient mod level 0" << endl;
3912
}
3913
3914
Matrix<Integer> ProjGen(nr_gen,dim-level0_dim);
3915
for(size_t i=0;i<nr_gen;++i){
3916
ProjGen[i]=ProjToLevel0Quot.MxV(Generators[i]);
3917
}
3918
3919
vector<Integer> GradingProj=ProjToLevel0Quot.transpose().solve_ZZ(Truncation);
3920
3921
Full_Cone<Integer> Cproj(ProjGen);
3922
Cproj.verbose=false;
3923
Cproj.Grading=GradingProj;
3924
Cproj.is_Computed.set(ConeProperty::Grading);
3925
Cproj.do_deg1_elements=true;
3926
Cproj.compute();
3927
3928
module_rank=Cproj.Deg1_Elements.size();
3929
is_Computed.set(ConeProperty::ModuleRank);
3930
return;
3931
}
3932
3933
//---------------------------------------------------------------------------
3934
3935
template<typename Integer>
3936
void Full_Cone<Integer>::find_module_rank_from_HB(){
3937
3938
if(level0_dim==0){
3939
module_rank=Hilbert_Basis.size();
3940
is_Computed.set(ConeProperty::ModuleRank);
3941
return;
3942
}
3943
3944
3945
set<vector<Integer> > Quotient;
3946
vector<Integer> v;
3947
3948
// cout << "=======================" << endl;
3949
// ProjToLevel0Quot.print(cout);
3950
// cout << "=======================" << endl;
3951
3952
typename list<vector<Integer> >::iterator h;
3953
3954
for(h=Hilbert_Basis.begin();h!=Hilbert_Basis.end();++h){
3955
3956
INTERRUPT_COMPUTATION_BY_EXCEPTION
3957
3958
v=ProjToLevel0Quot.MxV(*h);
3959
bool zero=true;
3960
for(size_t j=0;j<v.size();++j)
3961
if(v[j]!=0){
3962
zero=false;
3963
break;
3964
}
3965
if(!zero)
3966
Quotient.insert(v);
3967
}
3968
3969
module_rank=Quotient.size();
3970
is_Computed.set(ConeProperty::ModuleRank);
3971
3972
}
3973
3974
//---------------------------------------------------------------------------
3975
3976
template<typename Integer>
3977
void Full_Cone<Integer>::find_grading_inhom(){
3978
3979
if(Grading.size()==0 || Truncation.size()==0){
3980
throw FatalException("Cannot find grading in the inhomogeneous case!");
3981
}
3982
3983
if(shift!=0) // to avoid double computation
3984
return;
3985
3986
bool first=true;
3987
Integer level,degree,quot=0,min_quot=0;
3988
for(size_t i=0;i<nr_gen;++i){
3989
level=v_scalar_product(Truncation,Generators[i]);
3990
if(level==0)
3991
continue;
3992
degree=v_scalar_product(Grading,Generators[i]);
3993
quot=degree/level;
3994
// cout << Generators[i];
3995
// cout << "*** " << degree << " " << level << " " << quot << endl;
3996
if(level*quot>=degree)
3997
quot--;
3998
if(first){
3999
min_quot=quot;
4000
first=false;
4001
}
4002
if(quot<min_quot)
4003
min_quot=quot;
4004
// cout << "+++ " << min_quot << endl;
4005
}
4006
shift = min_quot;
4007
for(size_t i=0;i<dim;++i) // under this grading all generators have positive degree
4008
Grading[i] = Grading[i] - shift * Truncation[i];
4009
4010
// shift--; // NO LONGER correction for the Hilbert series computation to have it start in degree 0
4011
}
4012
4013
//---------------------------------------------------------------------------
4014
4015
template<typename Integer>
4016
void Full_Cone<Integer>::set_degrees() {
4017
4018
// Generators.pretty_print(cout);
4019
// cout << "Grading " << Grading;
4020
if (gen_degrees.size() != nr_gen && isComputed(ConeProperty::Grading)) // now we set the degrees
4021
{
4022
gen_degrees.resize(nr_gen);
4023
vector<Integer> gen_degrees_Integer=Generators.MxV(Grading);
4024
for (size_t i=0; i<nr_gen; i++) {
4025
if (gen_degrees_Integer[i] < 1) {
4026
throw BadInputException("Grading gives non-positive value "
4027
+ toString(gen_degrees_Integer[i])
4028
+ " for generator " + toString(i+1) + ".");
4029
}
4030
convert(gen_degrees[i], gen_degrees_Integer[i]);
4031
}
4032
}
4033
4034
}
4035
4036
//---------------------------------------------------------------------------
4037
4038
template<typename Integer>
4039
void Full_Cone<Integer>::set_levels() {
4040
if(inhomogeneous && Truncation.size()!=dim){
4041
throw FatalException("Truncation not defined in inhomogeneous case.");
4042
}
4043
4044
// cout <<"trunc " << Truncation;
4045
4046
if(gen_levels.size()!=nr_gen) // now we compute the levels
4047
{
4048
gen_levels.resize(nr_gen);
4049
vector<Integer> gen_levels_Integer=Generators.MxV(Truncation);
4050
for (size_t i=0; i<nr_gen; i++) {
4051
if (gen_levels_Integer[i] < 0) {
4052
throw FatalException("Truncation gives non-positive value "
4053
+ toString(gen_levels_Integer[i]) + " for generator "
4054
+ toString(i+1) + ".");
4055
}
4056
convert(gen_levels[i], gen_levels_Integer[i]);
4057
// cout << "Gen " << Generators[i];
4058
// cout << "level " << gen_levels[i] << endl << "----------------------" << endl;
4059
}
4060
}
4061
4062
}
4063
4064
//---------------------------------------------------------------------------
4065
4066
template<typename Integer>
4067
void Full_Cone<Integer>::sort_gens_by_degree(bool triangulate) {
4068
// if(deg1_extreme_rays) // gen_degrees.size()==0 ||
4069
// return;
4070
4071
if(keep_order)
4072
return;
4073
4074
Matrix<Integer> Weights(0,dim);
4075
vector<bool> absolute;
4076
if(triangulate){
4077
if(isComputed(ConeProperty::Grading)){
4078
Weights.append(Grading);
4079
absolute.push_back(false);
4080
}
4081
else{
4082
Weights.append(vector<Integer>(dim,1));
4083
absolute.push_back(true);
4084
}
4085
}
4086
4087
vector<key_t> perm=Generators.perm_by_weights(Weights,absolute);
4088
Generators.order_rows_by_perm(perm);
4089
order_by_perm(Extreme_Rays_Ind,perm);
4090
if(isComputed(ConeProperty::Grading))
4091
order_by_perm(gen_degrees,perm);
4092
if(inhomogeneous && gen_levels.size()==nr_gen)
4093
order_by_perm(gen_levels,perm);
4094
compose_perm_gens(perm);
4095
4096
if(triangulate){
4097
Integer roughness;
4098
if(isComputed(ConeProperty::Grading)){
4099
roughness=gen_degrees[nr_gen-1]/gen_degrees[0];
4100
}
4101
else{
4102
Integer max_norm=0, min_norm=0;
4103
for(size_t i=0;i<dim;++i){
4104
max_norm+=Iabs(Generators[nr_gen-1][i]);
4105
min_norm+=Iabs(Generators[0][i]);
4106
}
4107
roughness=max_norm/min_norm;
4108
}
4109
if(verbose){
4110
verboseOutput() << "Roughness " << roughness << endl;
4111
}
4112
4113
if(roughness >= 10 && !suppress_bottom_dec){
4114
do_bottom_dec=true;
4115
if(verbose){
4116
verboseOutput() << "Bottom decomposition activated" << endl;
4117
}
4118
}
4119
}
4120
4121
if (verbose) {
4122
if(triangulate){
4123
if(isComputed(ConeProperty::Grading)){
4124
verboseOutput() <<"Generators sorted by degree and lexicographically" << endl;
4125
verboseOutput() << "Generators per degree:" << endl;
4126
verboseOutput() << count_in_map<Integer,long>(gen_degrees);
4127
}
4128
else
4129
verboseOutput() << "Generators sorted by 1-norm and lexicographically" << endl;
4130
}
4131
else{
4132
verboseOutput() << "Generators sorted lexicographically" << endl;
4133
}
4134
}
4135
keep_order=true;
4136
}
4137
4138
//---------------------------------------------------------------------------
4139
4140
template<typename Integer>
4141
void Full_Cone<Integer>::compose_perm_gens(const vector<key_t>& perm) {
4142
order_by_perm(PermGens,perm);
4143
}
4144
4145
//---------------------------------------------------------------------------
4146
4147
// an alternative to compute() for the basic tasks that need no triangulation
4148
template<typename Integer>
4149
void Full_Cone<Integer>::dualize_cone(bool print_message){
4150
4151
omp_start_level=omp_get_level();
4152
4153
if(dim==0){
4154
set_zero_cone();
4155
return;
4156
}
4157
4158
// DO NOT CALL do_vars_check!!
4159
4160
bool save_tri = do_triangulation;
4161
bool save_part_tri = do_partial_triangulation;
4162
do_triangulation = false;
4163
do_partial_triangulation = false;
4164
4165
if(print_message) start_message();
4166
4167
sort_gens_by_degree(false);
4168
4169
if(!isComputed(ConeProperty::SupportHyperplanes))
4170
build_top_cone();
4171
4172
if(do_pointed)
4173
check_pointed();
4174
4175
if(do_extreme_rays) // in case we have known the support hyperplanes
4176
compute_extreme_rays();
4177
4178
do_triangulation = save_tri;
4179
do_partial_triangulation = save_part_tri;
4180
if(print_message) end_message();
4181
}
4182
4183
//---------------------------------------------------------------------------
4184
4185
template<typename Integer>
4186
vector<key_t> Full_Cone<Integer>::find_start_simplex() const {
4187
return Generators.max_rank_submatrix_lex();
4188
}
4189
4190
//---------------------------------------------------------------------------
4191
4192
template<typename Integer>
4193
Matrix<Integer> Full_Cone<Integer>::select_matrix_from_list(const list<vector<Integer> >& S,
4194
vector<size_t>& selection){
4195
4196
sort(selection.begin(),selection.end());
4197
assert(selection.back()<S.size());
4198
size_t i=0,j=0;
4199
size_t k=selection.size();
4200
Matrix<Integer> M(selection.size(),S.front().size());
4201
typename list<vector<Integer> >::const_iterator ll=S.begin();
4202
for(;ll!=S.end()&&i<k;++ll){
4203
if(j==selection[i]){
4204
M[i]=*ll;
4205
i++;
4206
}
4207
j++;
4208
}
4209
return M;
4210
}
4211
4212
//---------------------------------------------------------------------------
4213
4214
template<typename Integer>
4215
4216
void Full_Cone<Integer>::minimize_support_hyperplanes(){
4217
if(Support_Hyperplanes.nr_of_rows() == 0)
4218
return;
4219
if(isComputed(ConeProperty::SupportHyperplanes)){
4220
nrSupport_Hyperplanes=Support_Hyperplanes.nr_of_rows();
4221
return;
4222
}
4223
if (verbose) {
4224
verboseOutput() << "Minimize the given set of support hyperplanes by "
4225
<< "computing the extreme rays of the dual cone" << endl;
4226
}
4227
Full_Cone<Integer> Dual(Support_Hyperplanes);
4228
Dual.verbose=verbose;
4229
Dual.Support_Hyperplanes = Generators;
4230
Dual.is_Computed.set(ConeProperty::SupportHyperplanes);
4231
Dual.compute_extreme_rays();
4232
Support_Hyperplanes = Dual.Generators.submatrix(Dual.Extreme_Rays_Ind); //only essential hyperplanes
4233
is_Computed.set(ConeProperty::SupportHyperplanes);
4234
nrSupport_Hyperplanes=Support_Hyperplanes.nr_of_rows();
4235
do_all_hyperplanes=false;
4236
}
4237
4238
4239
//---------------------------------------------------------------------------
4240
4241
template<typename Integer>
4242
void Full_Cone<Integer>::compute_extreme_rays(bool use_facets){
4243
4244
if (isComputed(ConeProperty::ExtremeRays))
4245
return;
4246
// when we do approximation, we do not have the correct hyperplanes
4247
// and cannot compute the extreme rays
4248
if (is_approximation)
4249
return;
4250
assert(isComputed(ConeProperty::SupportHyperplanes));
4251
4252
check_pointed();
4253
if(!pointed){
4254
throw NonpointedException();
4255
}
4256
4257
if(dim*Support_Hyperplanes.nr_of_rows() < nr_gen) {
4258
compute_extreme_rays_rank(use_facets);
4259
} else {
4260
compute_extreme_rays_compare(use_facets);
4261
}
4262
}
4263
4264
//---------------------------------------------------------------------------
4265
4266
template<typename Integer>
4267
void Full_Cone<Integer>::compute_extreme_rays_rank(bool use_facets){
4268
4269
if (verbose) verboseOutput() << "Select extreme rays via rank ... " << flush;
4270
4271
size_t i;
4272
vector<key_t> gen_in_hyperplanes;
4273
gen_in_hyperplanes.reserve(Support_Hyperplanes.nr_of_rows());
4274
Matrix<Integer> M(Support_Hyperplanes.nr_of_rows(),dim);
4275
4276
deque<bool> Ext(nr_gen,false);
4277
#pragma omp parallel for firstprivate(gen_in_hyperplanes,M)
4278
for(i=0;i<nr_gen;++i){
4279
// if (isComputed(ConeProperty::Triangulation) && !in_triang[i])
4280
// continue;
4281
4282
INTERRUPT_COMPUTATION_BY_EXCEPTION
4283
4284
gen_in_hyperplanes.clear();
4285
if(use_facets){
4286
typename list<FACETDATA>::const_iterator IHV=Facets.begin();
4287
for (size_t j=0; j<Support_Hyperplanes.nr_of_rows(); ++j, ++IHV){
4288
if(IHV->GenInHyp.test(i))
4289
gen_in_hyperplanes.push_back(j);
4290
}
4291
}
4292
else{
4293
for (size_t j=0; j<Support_Hyperplanes.nr_of_rows(); ++j){
4294
if(v_scalar_product(Generators[i],Support_Hyperplanes[j])==0)
4295
gen_in_hyperplanes.push_back(j);
4296
}
4297
}
4298
if (gen_in_hyperplanes.size() < dim-1)
4299
continue;
4300
if (M.rank_submatrix(Support_Hyperplanes,gen_in_hyperplanes) >= dim-1)
4301
Ext[i]=true;
4302
}
4303
for(i=0; i<nr_gen;++i)
4304
Extreme_Rays_Ind[i]=Ext[i];
4305
4306
is_Computed.set(ConeProperty::ExtremeRays);
4307
if (verbose) verboseOutput() << "done." << endl;
4308
}
4309
4310
//---------------------------------------------------------------------------
4311
4312
template<typename Integer>
4313
void Full_Cone<Integer>::compute_extreme_rays_compare(bool use_facets){
4314
4315
if (verbose) verboseOutput() << "Select extreme rays via comparison ... " << flush;
4316
4317
size_t i,j,k;
4318
// Matrix<Integer> SH=getSupportHyperplanes().transpose();
4319
// Matrix<Integer> Val=Generators.multiplication(SH);
4320
size_t nc=Support_Hyperplanes.nr_of_rows();
4321
4322
vector<vector<bool> > Val(nr_gen);
4323
for (i=0;i<nr_gen;++i)
4324
Val[i].resize(nc);
4325
4326
// In this routine Val[i][j]==1, i.e. true, indicates that
4327
// the i-th generator is contained in the j-th support hyperplane
4328
4329
vector<key_t> Zero(nc);
4330
vector<key_t> nr_ones(nr_gen);
4331
4332
for (i = 0; i <nr_gen; i++) {
4333
4334
INTERRUPT_COMPUTATION_BY_EXCEPTION
4335
4336
k=0;
4337
Extreme_Rays_Ind[i]=true;
4338
if(use_facets){
4339
typename list<FACETDATA>::const_iterator IHV=Facets.begin();
4340
for (j=0; j<Support_Hyperplanes.nr_of_rows(); ++j, ++IHV){
4341
if(IHV->GenInHyp.test(i)){
4342
k++;
4343
Val[i][j]=true;
4344
}
4345
else
4346
Val[i][j]=false;
4347
}
4348
}
4349
else{
4350
for (j = 0; j <nc; ++j) {
4351
if (v_scalar_product(Generators[i],Support_Hyperplanes[j])==0) {
4352
k++;
4353
Val[i][j]=true;
4354
}
4355
else
4356
Val[i][j]=false;
4357
}
4358
}
4359
nr_ones[i]=k;
4360
if (k<dim-1||k==nc) // not contained in enough facets or in all (0 as generator)
4361
Extreme_Rays_Ind[i]=false;
4362
}
4363
4364
maximal_subsets(Val,Extreme_Rays_Ind);
4365
4366
is_Computed.set(ConeProperty::ExtremeRays);
4367
if (verbose) verboseOutput() << "done." << endl;
4368
}
4369
4370
//---------------------------------------------------------------------------
4371
4372
template<typename Integer>
4373
void Full_Cone<Integer>::compute_class_group() { // from the support hyperplanes
4374
if(!do_class_group || !isComputed(ConeProperty::SupportHyperplanes) || isComputed(ConeProperty::ClassGroup))
4375
return;
4376
Matrix<Integer> Trans=Support_Hyperplanes; // .transpose();
4377
size_t rk;
4378
Trans.SmithNormalForm(rk);
4379
ClassGroup.push_back(Support_Hyperplanes.nr_of_rows()-rk);
4380
for(size_t i=0;i<rk;++i)
4381
if(Trans[i][i]!=1)
4382
ClassGroup.push_back(Trans[i][i]);
4383
is_Computed.set(ConeProperty::ClassGroup);
4384
}
4385
4386
//---------------------------------------------------------------------------
4387
4388
template<typename Integer>
4389
void Full_Cone<Integer>::select_deg1_elements() { // from the Hilbert basis
4390
4391
if(inhomogeneous)
4392
return;
4393
typename list<vector<Integer> >::iterator h = Hilbert_Basis.begin();
4394
for(;h!=Hilbert_Basis.end();h++){
4395
if(v_scalar_product(Grading,*h)==1)
4396
Deg1_Elements.push_back(*h);
4397
}
4398
is_Computed.set(ConeProperty::Deg1Elements,true);
4399
}
4400
4401
//---------------------------------------------------------------------------
4402
4403
template<typename Integer>
4404
bool Full_Cone<Integer>::subcone_contains(const vector<Integer>& v) {
4405
for (size_t i=0; i<Subcone_Support_Hyperplanes.nr_of_rows(); ++i)
4406
if (v_scalar_product(Subcone_Support_Hyperplanes[i],v) < 0)
4407
return false;
4408
for (size_t i=0; i<Subcone_Equations.nr_of_rows(); ++i)
4409
if (v_scalar_product(Subcone_Equations[i],v) != 0)
4410
return false;
4411
if(is_global_approximation)
4412
if(v_scalar_product(Subcone_Grading,v)!=1)
4413
return false;
4414
return true;
4415
}
4416
4417
//---------------------------------------------------------------------------
4418
4419
template<typename Integer>
4420
bool Full_Cone<Integer>::contains(const vector<Integer>& v) {
4421
for (size_t i=0; i<Support_Hyperplanes.nr_of_rows(); ++i)
4422
if (v_scalar_product(Support_Hyperplanes[i],v) < 0)
4423
return false;
4424
return true;
4425
}
4426
//---------------------------------------------------------------------------
4427
4428
template<typename Integer>
4429
bool Full_Cone<Integer>::contains(const Full_Cone& C) {
4430
for(size_t i=0;i<C.nr_gen;++i)
4431
if(!contains(C.Generators[i])){
4432
cerr << "Missing generator " << C.Generators[i] << endl;
4433
return(false);
4434
}
4435
return(true);
4436
}
4437
//---------------------------------------------------------------------------
4438
4439
template<typename Integer>
4440
void Full_Cone<Integer>::select_deg1_elements(const Full_Cone& C) { // from vectors computed in
4441
// the auxiliary cone C
4442
assert(isComputed(ConeProperty::SupportHyperplanes));
4443
assert(C.isComputed(ConeProperty::Deg1Elements));
4444
typename list<vector<Integer> >::const_iterator h = C.Deg1_Elements.begin();
4445
for(;h!=C.Deg1_Elements.end();++h){
4446
if(contains(*h))
4447
Deg1_Elements.push_back(*h);
4448
}
4449
is_Computed.set(ConeProperty::Deg1Elements,true);
4450
}
4451
4452
//---------------------------------------------------------------------------
4453
4454
4455
// so far only for experiments
4456
/*
4457
template<typename Integer>
4458
void Full_Cone<Integer>::select_Hilbert_Basis(const Full_Cone& C) { // from vectors computed in
4459
// the auxiliary cone C
4460
assert(isComputed(ConeProperty::SupportHyperplanes));
4461
assert(C.isComputed(ConeProperty::Deg1Elements));
4462
typename list<vector<Integer> >::const_iterator h = C.Hilbert_Basis.begin();
4463
for(;h!=C.Hilbert_Basis.end();++h){
4464
if(contains(*h))
4465
// Deg1_Elements.push_back(*h);
4466
cout << *h;
4467
}
4468
exit(0);
4469
is_Computed.set(ConeProperty::Deg1Elements,true);
4470
}
4471
*/
4472
4473
//---------------------------------------------------------------------------
4474
4475
template<typename Integer>
4476
void Full_Cone<Integer>::check_pointed() {
4477
if (isComputed(ConeProperty::IsPointed))
4478
return;
4479
assert(isComputed(ConeProperty::SupportHyperplanes));
4480
if (isComputed(ConeProperty::Grading)){
4481
pointed=true;
4482
if (verbose) verboseOutput() << "Pointed since graded" << endl;
4483
is_Computed.set(ConeProperty::IsPointed);
4484
return;
4485
}
4486
if (verbose) verboseOutput() << "Checking pointedness ... " << flush;
4487
if(Support_Hyperplanes.nr_of_rows() <= dim*dim/2){
4488
pointed = (Support_Hyperplanes.rank() == dim);
4489
}
4490
else
4491
pointed = (Support_Hyperplanes.max_rank_submatrix_lex().size() == dim);
4492
is_Computed.set(ConeProperty::IsPointed);
4493
if(pointed && Grading.size()>0){
4494
throw BadInputException("Grading not positive on pointed cone.");
4495
}
4496
if (verbose) verboseOutput() << "done." << endl;
4497
}
4498
4499
4500
//---------------------------------------------------------------------------
4501
template<typename Integer>
4502
void Full_Cone<Integer>::disable_grading_dep_comp() {
4503
4504
if (do_multiplicity || do_deg1_elements || do_h_vector) {
4505
if (do_default_mode) {
4506
// if (verbose)
4507
// verboseOutput() << "No grading specified and cannot find one. "
4508
// << "Disabling some computations!" << endl;
4509
do_deg1_elements = false;
4510
do_h_vector = false;
4511
if(!explicit_full_triang){
4512
do_triangulation=false;
4513
if(do_Hilbert_basis)
4514
do_partial_triangulation=true;
4515
}
4516
} else {
4517
throw BadInputException("No grading specified and cannot find one. Cannot compute some requested properties!");
4518
}
4519
}
4520
}
4521
4522
//---------------------------------------------------------------------------
4523
4524
template<typename Integer>
4525
void Full_Cone<Integer>::deg1_check() {
4526
4527
if(inhomogeneous) // deg 1 check disabled since it makes no sense in this case
4528
return;
4529
4530
if (!isComputed(ConeProperty::Grading) && Grading.size()==0 // we still need it and
4531
&& !isComputed(ConeProperty::IsDeg1ExtremeRays)) { // we have not tried it
4532
if (isComputed(ConeProperty::ExtremeRays)) {
4533
Matrix<Integer> Extreme=Generators.submatrix(Extreme_Rays_Ind);
4534
if (has_generator_with_common_divisor)
4535
Extreme.make_prime();
4536
try{
4537
Grading = Extreme.find_linear_form();
4538
} catch(const ArithmeticException& e) { // if the exception has been thrown, the grading has
4539
Grading.resize(0); // we consider the grafing as non existing -- though this may not be true
4540
if(verbose)
4541
verboseOutput() << "Giving up the check for a grading" << endl;
4542
}
4543
4544
if (Grading.size() == dim && v_scalar_product(Grading,Extreme[0])==1) {
4545
is_Computed.set(ConeProperty::Grading);
4546
} else {
4547
deg1_extreme_rays = false;
4548
Grading.clear();
4549
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
4550
}
4551
} else // extreme rays not known
4552
if (!deg1_generated_computed) {
4553
Matrix<Integer> GenCopy = Generators;
4554
if (has_generator_with_common_divisor)
4555
GenCopy.make_prime();
4556
try{
4557
Grading = GenCopy.find_linear_form();
4558
} catch(const ArithmeticException& e) { // if the exception has been thrown,
4559
Grading.resize(0); // we consider the grafing as non existing-- though this may not be true
4560
if(verbose)
4561
verboseOutput() << "Giving up the check for a grading" << endl;
4562
}
4563
if (Grading.size() == dim && v_scalar_product(Grading,GenCopy[0])==1) {
4564
is_Computed.set(ConeProperty::Grading);
4565
} else {
4566
deg1_generated = false;
4567
deg1_generated_computed = true;
4568
Grading.clear();
4569
}
4570
}
4571
}
4572
4573
//now we hopefully have a grading
4574
4575
if (!isComputed(ConeProperty::Grading)) {
4576
if (isComputed(ConeProperty::ExtremeRays)) {
4577
// there is no hope to find a grading later
4578
deg1_generated = false;
4579
deg1_generated_computed = true;
4580
deg1_extreme_rays = false;
4581
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
4582
disable_grading_dep_comp();
4583
}
4584
return; // we are done
4585
}
4586
4587
set_degrees();
4588
4589
vector<Integer> divided_gen_degrees = gen_degrees;
4590
if (has_generator_with_common_divisor) {
4591
Matrix<Integer> GenCopy = Generators;
4592
GenCopy.make_prime();
4593
convert(divided_gen_degrees, GenCopy.MxV(Grading));
4594
}
4595
4596
if (!deg1_generated_computed) {
4597
deg1_generated = true;
4598
for (size_t i = 0; i < nr_gen; i++) {
4599
if (divided_gen_degrees[i] != 1) {
4600
deg1_generated = false;
4601
break;
4602
}
4603
}
4604
deg1_generated_computed = true;
4605
if (deg1_generated) {
4606
deg1_extreme_rays = true;
4607
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
4608
}
4609
}
4610
if (!isComputed(ConeProperty::IsDeg1ExtremeRays)
4611
&& isComputed(ConeProperty::ExtremeRays)) {
4612
deg1_extreme_rays = true;
4613
for (size_t i = 0; i < nr_gen; i++) {
4614
if (Extreme_Rays_Ind[i] && divided_gen_degrees[i] != 1) {
4615
deg1_extreme_rays = false;
4616
break;
4617
}
4618
}
4619
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
4620
}
4621
}
4622
4623
//---------------------------------------------------------------------------
4624
4625
template<typename Integer>
4626
void Full_Cone<Integer>::check_deg1_hilbert_basis() {
4627
if (isComputed(ConeProperty::IsDeg1HilbertBasis) || inhomogeneous)
4628
return;
4629
4630
if ( !isComputed(ConeProperty::Grading) || !isComputed(ConeProperty::HilbertBasis)) {
4631
if (verbose) {
4632
errorOutput() << "WARNING: unsatisfied preconditions in check_deg1_hilbert_basis()!" <<endl;
4633
}
4634
return;
4635
}
4636
4637
if (isComputed(ConeProperty::Deg1Elements)) {
4638
deg1_hilbert_basis = (Deg1_Elements.size() == Hilbert_Basis.size());
4639
} else {
4640
deg1_hilbert_basis = true;
4641
typename list< vector<Integer> >::iterator h;
4642
for (h = Hilbert_Basis.begin(); h != Hilbert_Basis.end(); ++h) {
4643
if (v_scalar_product((*h),Grading)!=1) {
4644
deg1_hilbert_basis = false;
4645
break;
4646
}
4647
}
4648
}
4649
is_Computed.set(ConeProperty::IsDeg1HilbertBasis);
4650
}
4651
4652
//---------------------------------------------------------------------------
4653
4654
// Computes the generators of a supercone approximating "this" by a cone over a lattice polytope
4655
// for every vertex of the simplex, we get a matrix with the integer points of the respective Weyl chamber
4656
template<typename Integer>
4657
vector<list<vector<Integer>>> Full_Cone<Integer>::latt_approx() {
4658
4659
assert(isComputed(ConeProperty::Grading));
4660
assert(isComputed(ConeProperty::ExtremeRays));
4661
Matrix<Integer> G(1,dim);
4662
G[0]=Grading;
4663
Matrix<Integer> G_copy=G;
4664
4665
// Lineare_Transformation<Integer> NewBasis(G); // gives a new basis in which the grading is a coordinate
4666
size_t dummy;
4667
Matrix<Integer> U=G_copy.SmithNormalForm(dummy); // the basis elements are the columns of U
4668
4669
Integer dummy_denom;
4670
// vector<Integer> dummy_diag(dim);
4671
Matrix<Integer> T=U.invert(dummy_denom); // T is the coordinate transformation
4672
// to the new basis: v --> Tv (in this case)
4673
// for which the grading is the FIRST coordinate
4674
4675
assert(dummy_denom==1); // for safety
4676
4677
// It can happen that -Grading has become the first row of T, but we want Grading. If necessary we replace the
4678
// first row by its negative, and correspondingly the first column of U by its negative
4679
4680
if(T[0]!=Grading){
4681
for(size_t i=0;i<dim;++i){
4682
U[i][0]*=-1;
4683
T[0][i]*=-1;
4684
}
4685
}
4686
assert(T[0] == Grading);
4687
4688
Matrix<Integer> M;
4689
vector<list<vector<Integer>>> approx_points;
4690
size_t nr_approx=0;
4691
for(size_t i=0;i<nr_gen;++i){
4692
list<vector<Integer> > approx;
4693
if(Extreme_Rays_Ind[i]){
4694
//cout << "point before transformation: " << Generators[i];
4695
approx_simplex(T.MxV(Generators[i]),approx,1);
4696
// TODO: NECESSARY?
4697
//approx.unique()
4698
//cout << "Approximation points for generator " << i << ": " << Generators[i] << endl;
4699
nr_approx = 0;
4700
for(auto jt=approx.begin();jt!=approx.end();++jt){ // reverse transformation
4701
*jt=U.MxV(*jt);
4702
v_make_prime(*jt);
4703
++nr_approx;
4704
//cout << *jt << endl;
4705
}
4706
//~ M=Matrix<Integer>(approx);
4707
//~
4708
//~ // remove duplicates
4709
//~ for (size_t j=0;j<approx_points.size();j++){
4710
//~ M.remove_duplicate(approx_points[j]);
4711
//~ }
4712
// +++ TODO: REMOVE DUPLICATES MORE EFFICIENT +++
4713
if (nr_approx>1){
4714
for (size_t j=0;j<approx_points.size();j++){
4715
for (auto jt=approx_points[j].begin();jt!=approx_points[j].end();++jt){
4716
approx.remove(*jt);
4717
}
4718
}
4719
}
4720
}
4721
approx_points.push_back(approx);
4722
}
4723
4724
4725
//cout << "-------" << endl;
4726
//M.print(cout);
4727
//cout << "-------" << endl;
4728
4729
return(approx_points);
4730
}
4731
4732
//---------------------------------------------------------------------------
4733
4734
template<typename Integer>
4735
void Full_Cone<Integer>::prepare_inclusion_exclusion() {
4736
4737
if (ExcludedFaces.nr_of_rows() == 0)
4738
return;
4739
4740
do_excluded_faces = do_h_vector || do_Stanley_dec;
4741
4742
if (verbose && !do_excluded_faces) {
4743
errorOutput() << endl << "WARNING: exluded faces, but no h-vector computation or Stanley decomposition"
4744
<< endl << "Therefore excluded faces will be ignored" << endl;
4745
}
4746
4747
if (isComputed(ConeProperty::ExcludedFaces) &&
4748
(isComputed(ConeProperty::InclusionExclusionData) || !do_excluded_faces) ) {
4749
return;
4750
}
4751
4752
// indicates which generators lie in the excluded faces
4753
vector<boost::dynamic_bitset<> > GensInExcl(ExcludedFaces.nr_of_rows());
4754
4755
for(size_t j=0;j<ExcludedFaces.nr_of_rows();++j){ // now we produce these indicators
4756
bool first_neq_0=true; // and check whether the linear forms in ExcludedFaces
4757
bool non_zero=false; // have the cone on one side
4758
GensInExcl[j].resize(nr_gen,false);
4759
for(size_t i=0; i< nr_gen;++i){
4760
Integer test=v_scalar_product(ExcludedFaces[j],Generators[i]);
4761
if(test==0){
4762
GensInExcl[j].set(i);
4763
continue;
4764
}
4765
non_zero=true;
4766
if(first_neq_0){
4767
first_neq_0=false;
4768
if(test<0){
4769
for(size_t k=0;k<dim;++k) // replace linear form by its negative
4770
ExcludedFaces[j][k]*=-1; // to get cone in positive halfspace
4771
test*=-1; // (only for error check)
4772
}
4773
}
4774
if(test<0){
4775
throw FatalException("Excluded hyperplane does not define a face.");
4776
}
4777
4778
}
4779
if(!non_zero){ // not impossible if the hyperplane contains the vector space spanned by the cone
4780
throw FatalException("Excluded face contains the full cone.");
4781
}
4782
}
4783
4784
vector<bool> essential(ExcludedFaces.nr_of_rows(),true);
4785
bool remove_one=false;
4786
for(size_t i=0;i<essential.size();++i)
4787
for(size_t j=i+1;j<essential.size();++j){
4788
if(GensInExcl[j].is_subset_of(GensInExcl[i])){
4789
essential[j]=false;
4790
remove_one=true;
4791
continue;
4792
}
4793
if(GensInExcl[i].is_subset_of(GensInExcl[j])){
4794
essential[i]=false;
4795
remove_one=true;
4796
}
4797
}
4798
if(remove_one){
4799
Matrix<Integer> Help(0,dim);
4800
vector<boost::dynamic_bitset<> > HelpGensInExcl;
4801
for(size_t i=0;i<essential.size();++i)
4802
if(essential[i]){
4803
Help.append(ExcludedFaces[i]);
4804
HelpGensInExcl.push_back(GensInExcl[i]);
4805
}
4806
ExcludedFaces=Help;
4807
GensInExcl=HelpGensInExcl;
4808
}
4809
is_Computed.set(ConeProperty::ExcludedFaces);
4810
4811
4812
4813
if (isComputed(ConeProperty::InclusionExclusionData) || !do_excluded_faces) {
4814
return;
4815
}
4816
4817
vector< pair<boost::dynamic_bitset<> , long> > InExScheme; // now we produce the formal
4818
boost::dynamic_bitset<> all_gens(nr_gen); // inclusion-exclusion scheme
4819
all_gens.set(); // by forming all intersections of
4820
// excluded faces
4821
InExScheme.push_back(pair<boost::dynamic_bitset<> , long> (all_gens, 1));
4822
size_t old_size=1;
4823
4824
for(size_t i=0;i<ExcludedFaces.nr_of_rows();++i){
4825
for(size_t j=0;j<old_size;++j)
4826
InExScheme.push_back(pair<boost::dynamic_bitset<> , long>
4827
(InExScheme[j].first & GensInExcl[i], -InExScheme[j].second));
4828
old_size*=2;
4829
}
4830
4831
vector<pair<boost::dynamic_bitset<>, long> >::iterator G;
4832
4833
InExScheme.erase(InExScheme.begin()); // remove full cone
4834
4835
// map<boost::dynamic_bitset<>, long> InExCollect;
4836
InExCollect.clear();
4837
map<boost::dynamic_bitset<>, long>::iterator F;
4838
4839
for(size_t i=0;i<old_size-1;++i){ // we compactify the list of faces
4840
F=InExCollect.find(InExScheme[i].first); // obtained as intersections
4841
if(F!=InExCollect.end()) // by listing each face only once
4842
F->second+=InExScheme[i].second; // but with the right multiplicity
4843
else
4844
InExCollect.insert(InExScheme[i]);
4845
}
4846
4847
for(F=InExCollect.begin();F!=InExCollect.end();){ // faces with multiplicity 0
4848
if(F->second==0) // can be erased
4849
InExCollect.erase(F++);
4850
else{
4851
++F;
4852
}
4853
}
4854
4855
if(verbose){
4856
verboseOutput() << endl;
4857
verboseOutput() << "InEx complete, " << InExCollect.size() << " faces involved" << endl;
4858
}
4859
4860
is_Computed.set(ConeProperty::InclusionExclusionData);
4861
}
4862
4863
4864
4865
//---------------------------------------------------------------------------
4866
4867
/* computes a degree function, s.t. every generator has value >0 */
4868
template<typename Integer>
4869
vector<Integer> Full_Cone<Integer>::compute_degree_function() const {
4870
size_t i;
4871
vector<Integer> degree_function(dim,0);
4872
if (isComputed(ConeProperty::Grading)) { //use the grading if we have one
4873
for (i=0; i<dim; i++) {
4874
degree_function[i] = Grading[i];
4875
}
4876
} else { // add hyperplanes to get a degree function
4877
if(verbose) {
4878
verboseOutput()<<"computing degree function... "<<flush;
4879
}
4880
size_t h;
4881
for (h=0; h<Support_Hyperplanes.nr_of_rows(); ++h) {
4882
for (i=0; i<dim; i++) {
4883
degree_function[i] += Support_Hyperplanes.get_elem(h,i);
4884
}
4885
}
4886
v_make_prime(degree_function);
4887
if(verbose) {
4888
verboseOutput()<<"done."<<endl;
4889
}
4890
}
4891
return degree_function;
4892
}
4893
4894
//---------------------------------------------------------------------------
4895
4896
/* adds generators, they have to lie inside the existing cone */
4897
template<typename Integer>
4898
void Full_Cone<Integer>::add_generators(const Matrix<Integer>& new_points) {
4899
is_simplicial = false;
4900
int nr_new_points = new_points.nr_of_rows();
4901
int nr_old_gen = nr_gen;
4902
Generators.append(new_points);
4903
nr_gen += nr_new_points;
4904
set_degrees();
4905
Top_Key.resize(nr_gen);
4906
Extreme_Rays_Ind.resize(nr_gen);
4907
for (size_t i=nr_old_gen; i<nr_gen; ++i) {
4908
Top_Key[i] = i;
4909
Extreme_Rays_Ind[i] = false;
4910
}
4911
// inhom cones
4912
if (inhomogeneous) {
4913
set_levels();
4914
}
4915
// excluded faces have to be reinitialized
4916
is_Computed.set(ConeProperty::ExcludedFaces, false);
4917
is_Computed.set(ConeProperty::InclusionExclusionData, false);
4918
prepare_inclusion_exclusion();
4919
4920
if (do_Hilbert_basis) {
4921
// add new points to HilbertBasis
4922
for (size_t i = nr_old_gen; i < nr_gen; ++i) {
4923
if(!inhomogeneous || gen_levels[i]<=1) {
4924
OldCandidates.Candidates.push_back(Candidate<Integer>(Generators[i],*this));
4925
OldCandidates.Candidates.back().original_generator=true;
4926
}
4927
}
4928
OldCandidates.auto_reduce();
4929
}
4930
}
4931
4932
//---------------------------------------------------------------------------
4933
// Constructors
4934
//---------------------------------------------------------------------------
4935
4936
template<typename Integer>
4937
void Full_Cone<Integer>::reset_tasks(){
4938
do_triangulation = false;
4939
do_partial_triangulation = false;
4940
do_determinants = false;
4941
do_multiplicity=false;
4942
do_integrally_closed = false;
4943
do_Hilbert_basis = false;
4944
do_deg1_elements = false;
4945
keep_triangulation = false;
4946
do_Stanley_dec=false;
4947
do_h_vector=false;
4948
do_hsop = false;
4949
do_excluded_faces=false;
4950
do_approximation=false;
4951
do_default_mode=false;
4952
do_class_group = false;
4953
do_module_gens_intcl = false;
4954
do_module_rank = false;
4955
do_cone_dec=false;
4956
stop_after_cone_dec=false;
4957
4958
do_extreme_rays=false;
4959
do_pointed=false;
4960
4961
do_evaluation = false;
4962
do_only_multiplicity=false;
4963
4964
use_bottom_points = true;
4965
4966
nrSimplicialPyr=0;
4967
totalNrPyr=0;
4968
is_pyramid = false;
4969
triangulation_is_nested = false;
4970
triangulation_is_partial = false;
4971
}
4972
4973
4974
//---------------------------------------------------------------------------
4975
4976
template<typename Integer>
4977
Full_Cone<Integer>::Full_Cone(const Matrix<Integer>& M, bool do_make_prime){ // constructor of the top cone
4978
4979
omp_start_level=omp_get_level();
4980
4981
dim=M.nr_of_columns();
4982
if(dim>0)
4983
Generators=M;
4984
/* M.pretty_print(cout);
4985
cout << "------------------" << endl;
4986
M.transpose().pretty_print(cout);
4987
cout << "==================" << endl;*/
4988
4989
// assert(M.row_echelon()== dim); rank check now done later
4990
4991
/*index=1; // not used at present
4992
for(size_t i=0;i<dim;++i)
4993
index*=M[i][i];
4994
index=Iabs(index); */
4995
4996
//make the generators coprime, remove 0 rows and duplicates
4997
has_generator_with_common_divisor = false;
4998
if (do_make_prime) {
4999
Generators.make_prime();
5000
} else {
5001
nr_gen = Generators.nr_of_rows();
5002
for (size_t i = 0; i < nr_gen; ++i) {
5003
if (v_gcd(Generators[i]) != 1) {
5004
has_generator_with_common_divisor = true;
5005
break;
5006
}
5007
}
5008
}
5009
Generators.remove_duplicate_and_zero_rows();
5010
nr_gen = Generators.nr_of_rows();
5011
5012
if (nr_gen != static_cast<size_t>(static_cast<key_t>(nr_gen))) {
5013
throw FatalException("Too many generators to fit in range of key_t!");
5014
}
5015
5016
multiplicity = 0;
5017
is_Computed = bitset<ConeProperty::EnumSize>(); //initialized to false
5018
is_Computed.set(ConeProperty::Generators);
5019
pointed = false;
5020
is_simplicial = nr_gen == dim;
5021
deg1_extreme_rays = false;
5022
deg1_generated = false;
5023
deg1_generated_computed = false;
5024
deg1_hilbert_basis = false;
5025
5026
reset_tasks();
5027
5028
Extreme_Rays_Ind = vector<bool>(nr_gen,false);
5029
in_triang = vector<bool> (nr_gen,false);
5030
deg1_triangulation = true;
5031
if(dim==0){ //correction needed to include the 0 cone;
5032
multiplicity = 1;
5033
Hilbert_Series.add(vector<num_t>(1,1),vector<denom_t>());
5034
is_Computed.set(ConeProperty::HilbertSeries);
5035
is_Computed.set(ConeProperty::Triangulation);
5036
}
5037
pyr_level=-1;
5038
Top_Cone=this;
5039
Top_Key.resize(nr_gen);
5040
for(size_t i=0;i<nr_gen;i++)
5041
Top_Key[i]=i;
5042
totalNrSimplices=0;
5043
TriangulationBufferSize=0;
5044
CandidatesSize=0;
5045
detSum = 0;
5046
shift = 0;
5047
5048
FS.resize(omp_get_max_threads());
5049
5050
Pyramids.resize(20); // prepare storage for pyramids
5051
nrPyramids.resize(20,0);
5052
Pyramids_scrambled.resize(20,false);
5053
5054
recursion_allowed=true;
5055
5056
do_all_hyperplanes=true;
5057
// multithreaded_pyramid=true; now in build_cone where it is defined dynamically
5058
5059
5060
nextGen=0;
5061
store_level=0;
5062
5063
Comparisons.reserve(nr_gen);
5064
nrTotalComparisons=0;
5065
5066
inhomogeneous=false;
5067
5068
level0_dim=dim; // must always be defined
5069
5070
use_existing_facets=false;
5071
start_from=0;
5072
old_nr_supp_hyps=0;
5073
5074
verbose=false;
5075
OldCandidates.dual=false;
5076
OldCandidates.verbose=verbose;
5077
NewCandidates.dual=false;
5078
NewCandidates.verbose=verbose;
5079
5080
RankTest = vector< Matrix<Integer> >(omp_get_max_threads(), Matrix<Integer>(0,dim));
5081
5082
do_bottom_dec=false;
5083
suppress_bottom_dec=false;
5084
keep_order=false;
5085
5086
is_approximation=false;
5087
is_global_approximation=false;
5088
5089
PermGens.resize(nr_gen);
5090
for(size_t i=0;i<nr_gen;++i)
5091
PermGens[i]=i;
5092
}
5093
5094
//---------------------------------------------------------------------------
5095
5096
template<typename Integer>
5097
Full_Cone<Integer>::Full_Cone(Cone_Dual_Mode<Integer> &C) {
5098
5099
omp_start_level=omp_get_level();
5100
5101
is_Computed = bitset<ConeProperty::EnumSize>(); //initialized to false
5102
5103
dim = C.dim;
5104
Generators.swap(C.Generators);
5105
nr_gen = Generators.nr_of_rows();
5106
if (Generators.nr_of_rows() > 0)
5107
is_Computed.set(ConeProperty::Generators);
5108
has_generator_with_common_divisor = false;
5109
Extreme_Rays_Ind.swap(C.ExtremeRaysInd);
5110
if (!Extreme_Rays_Ind.empty()) is_Computed.set(ConeProperty::ExtremeRays);
5111
5112
multiplicity = 0;
5113
in_triang = vector<bool>(nr_gen,false);
5114
5115
Basis_Max_Subspace=C.BasisMaxSubspace;
5116
is_Computed.set(ConeProperty::MaximalSubspace);
5117
pointed = (Basis_Max_Subspace.nr_of_rows()==0);
5118
is_Computed.set(ConeProperty::IsPointed);
5119
is_simplicial = nr_gen == dim;
5120
deg1_extreme_rays = false;
5121
deg1_generated = false;
5122
deg1_generated_computed = false;
5123
deg1_triangulation = false;
5124
deg1_hilbert_basis = false;
5125
5126
reset_tasks();
5127
5128
if (!Extreme_Rays_Ind.empty()) { // only then we can assume that all entries on C.Supp.. are relevant
5129
Support_Hyperplanes.swap(C.SupportHyperplanes);
5130
// there may be duplicates in the coordinates of the Full_Cone
5131
Support_Hyperplanes.remove_duplicate_and_zero_rows();
5132
is_Computed.set(ConeProperty::SupportHyperplanes);
5133
}
5134
if(!C.do_only_Deg1_Elements){
5135
Hilbert_Basis.swap(C.Hilbert_Basis);
5136
is_Computed.set(ConeProperty::HilbertBasis);
5137
}
5138
else {
5139
Deg1_Elements.swap(C.Hilbert_Basis);
5140
is_Computed.set(ConeProperty::Deg1Elements);
5141
}
5142
if(dim==0){ //correction needed to include the 0 cone;
5143
multiplicity = 1;
5144
Hilbert_Series.add(vector<num_t>(1,1),vector<denom_t>());
5145
is_Computed.set(ConeProperty::HilbertSeries);
5146
}
5147
pyr_level=-1;
5148
Top_Cone=this;
5149
Top_Key.resize(nr_gen);
5150
for(size_t i=0;i<nr_gen;i++)
5151
Top_Key[i]=i;
5152
totalNrSimplices=0;
5153
TriangulationBufferSize=0;
5154
CandidatesSize=0;
5155
detSum = 0;
5156
shift = 0;
5157
5158
do_all_hyperplanes=true;
5159
5160
tri_recursion=false;
5161
5162
nextGen=0;
5163
5164
inhomogeneous=C.inhomogeneous;
5165
5166
level0_dim=dim; // must always be defined
5167
5168
use_existing_facets=false;
5169
start_from=0;
5170
old_nr_supp_hyps=0;
5171
verbose = C.verbose;
5172
OldCandidates.dual=false;
5173
OldCandidates.verbose=verbose;
5174
NewCandidates.dual=false;
5175
NewCandidates.verbose=verbose;
5176
5177
is_approximation=false;
5178
5179
verbose=C.verbose;
5180
}
5181
5182
//---------------------------------------------------------------------------
5183
5184
template<typename Integer>
5185
void Full_Cone<Integer>::check_grading_after_dual_mode(){
5186
5187
if(dim>0 && Grading.size()>0 && !isComputed(ConeProperty::Grading)) {
5188
if(isComputed(ConeProperty::Generators)){
5189
vector<Integer> degrees=Generators.MxV(Grading);
5190
vector<Integer> levels;
5191
if(inhomogeneous)
5192
levels=Generators.MxV(Truncation);
5193
size_t i=0;
5194
for(;i<degrees.size();++i){
5195
if(degrees[i]<=0 &&(!inhomogeneous || levels[i]==0))
5196
break;
5197
}
5198
if(i==degrees.size())
5199
is_Computed.set(ConeProperty::Grading);
5200
}
5201
else if(isComputed(ConeProperty::HilbertBasis)){
5202
auto hb=Hilbert_Basis.begin();
5203
for(;hb!=Hilbert_Basis.end();++hb){
5204
if(v_scalar_product(*hb,Grading)<=0 && (!inhomogeneous || v_scalar_product(*hb,Truncation)==0))
5205
break;
5206
}
5207
if(hb==Hilbert_Basis.end())
5208
is_Computed.set(ConeProperty::Grading);
5209
}
5210
}
5211
if(isComputed(ConeProperty::Deg1Elements)){
5212
auto hb=Deg1_Elements.begin();
5213
for(;hb!=Deg1_Elements.end();++hb){
5214
if(v_scalar_product(*hb,Grading)<=0)
5215
break;
5216
}
5217
if(hb==Deg1_Elements.end())
5218
is_Computed.set(ConeProperty::Grading);
5219
}
5220
5221
if(Grading.size()>0 && !isComputed(ConeProperty::Grading)){
5222
throw BadInputException("Grading not positive on pointed cone.");
5223
}
5224
}
5225
5226
template<typename Integer>
5227
void Full_Cone<Integer>::dual_mode() {
5228
5229
omp_start_level=omp_get_level();
5230
5231
if(dim==0){
5232
set_zero_cone();
5233
return;
5234
}
5235
5236
use_existing_facets=false; // completely irrelevant here
5237
start_from=0;
5238
old_nr_supp_hyps=0;
5239
5240
INTERRUPT_COMPUTATION_BY_EXCEPTION
5241
5242
compute_class_group();
5243
5244
check_grading_after_dual_mode();
5245
5246
if(dim>0 && !inhomogeneous){
5247
deg1_check();
5248
if (isComputed(ConeProperty::Grading) && !isComputed(ConeProperty::Deg1Elements)) {
5249
if (verbose) {
5250
verboseOutput() << "Find degree 1 elements" << endl;
5251
}
5252
select_deg1_elements();
5253
}
5254
}
5255
5256
if(dim==0){
5257
deg1_extreme_rays = deg1_generated = true;
5258
Grading=vector<Integer>(dim);
5259
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
5260
deg1_generated_computed = true;
5261
is_Computed.set(ConeProperty::Grading);
5262
}
5263
if(!inhomogeneous && isComputed(ConeProperty::HilbertBasis)){
5264
if (isComputed(ConeProperty::Grading)) check_deg1_hilbert_basis();
5265
}
5266
5267
if (inhomogeneous && isComputed(ConeProperty::Generators)) {
5268
set_levels();
5269
find_level0_dim();
5270
find_module_rank();
5271
}
5272
5273
use_existing_facets=false;
5274
start_from=0;
5275
}
5276
5277
//---------------------------------------------------------------------------
5278
5279
/* constructor for pyramids */
5280
template<typename Integer>
5281
Full_Cone<Integer>::Full_Cone(Full_Cone<Integer>& C, const vector<key_t>& Key) {
5282
5283
omp_start_level=C.omp_start_level;
5284
5285
Generators = C.Generators.submatrix(Key);
5286
dim = Generators.nr_of_columns();
5287
nr_gen = Generators.nr_of_rows();
5288
has_generator_with_common_divisor = C.has_generator_with_common_divisor;
5289
is_simplicial = nr_gen == dim;
5290
5291
Top_Cone=C.Top_Cone; // relate to top cone
5292
Top_Key.resize(nr_gen);
5293
for(size_t i=0;i<nr_gen;i++)
5294
Top_Key[i]=C.Top_Key[Key[i]];
5295
5296
multiplicity = 0;
5297
5298
Extreme_Rays_Ind = vector<bool>(nr_gen,false);
5299
is_Computed.set(ConeProperty::ExtremeRays, C.isComputed(ConeProperty::ExtremeRays));
5300
if(isComputed(ConeProperty::ExtremeRays))
5301
for(size_t i=0;i<nr_gen;i++)
5302
Extreme_Rays_Ind[i]=C.Extreme_Rays_Ind[Key[i]];
5303
in_triang = vector<bool> (nr_gen,false);
5304
deg1_triangulation = true;
5305
5306
// not used in a pyramid, but set precaution
5307
deg1_extreme_rays = false;
5308
deg1_generated = false;
5309
deg1_generated_computed = false;
5310
deg1_hilbert_basis = false;
5311
5312
Grading=C.Grading;
5313
is_Computed.set(ConeProperty::Grading, C.isComputed(ConeProperty::Grading));
5314
Order_Vector=C.Order_Vector;
5315
5316
do_extreme_rays=false;
5317
do_triangulation=C.do_triangulation;
5318
do_partial_triangulation=C.do_partial_triangulation;
5319
do_determinants=C.do_determinants;
5320
do_multiplicity=C.do_multiplicity;
5321
do_deg1_elements=C.do_deg1_elements;
5322
do_h_vector=C.do_h_vector;
5323
do_Hilbert_basis=C.do_Hilbert_basis;
5324
keep_triangulation=C.keep_triangulation;
5325
do_only_multiplicity=C.do_only_multiplicity;
5326
do_evaluation=C.do_evaluation;
5327
do_Stanley_dec=C.do_Stanley_dec;
5328
inhomogeneous=C.inhomogeneous; // at present not used in proper pyramids
5329
is_pyramid=true;
5330
5331
pyr_level=C.pyr_level+1;
5332
5333
totalNrSimplices=0;
5334
detSum = 0;
5335
shift = C.shift;
5336
if(C.gen_degrees.size()>0){ // now we copy the degrees
5337
gen_degrees.resize(nr_gen);
5338
for (size_t i=0; i<nr_gen; i++) {
5339
gen_degrees[i] = C.gen_degrees[Key[i]];
5340
}
5341
}
5342
if(C.gen_levels.size()>0){ // now we copy the levels
5343
gen_levels.resize(nr_gen);
5344
for (size_t i=0; i<nr_gen; i++) {
5345
gen_levels[i] = C.gen_levels[Key[i]];
5346
}
5347
}
5348
TriangulationBufferSize=0;
5349
CandidatesSize=0;
5350
5351
recursion_allowed=C.recursion_allowed; // must be reset if necessary
5352
do_all_hyperplanes=true; // must be reset for non-recursive pyramids
5353
// multithreaded_pyramid=false; // SEE ABOVE
5354
5355
nextGen=0;
5356
store_level = C.store_level;
5357
5358
Comparisons.reserve(nr_gen);
5359
nrTotalComparisons=0;
5360
5361
level0_dim = C.level0_dim; // must always be defined
5362
5363
use_existing_facets=false;
5364
start_from=0;
5365
old_nr_supp_hyps=0;
5366
verbose=false;
5367
OldCandidates.dual=false;
5368
OldCandidates.verbose=verbose;
5369
NewCandidates.dual=false;
5370
NewCandidates.verbose=verbose;
5371
5372
is_approximation = C.is_approximation;
5373
is_global_approximation = C.is_global_approximation;
5374
5375
do_bottom_dec=false;
5376
suppress_bottom_dec=false;
5377
keep_order=true;
5378
}
5379
5380
//---------------------------------------------------------------------------
5381
5382
template<typename Integer>
5383
void Full_Cone<Integer>::set_zero_cone() {
5384
5385
assert(dim==0);
5386
5387
if (verbose) {
5388
verboseOutput() << "Zero cone detected!" << endl;
5389
}
5390
5391
// The basis change already is transforming to zero.
5392
is_Computed.set(ConeProperty::Sublattice);
5393
is_Computed.set(ConeProperty::Generators);
5394
is_Computed.set(ConeProperty::ExtremeRays);
5395
Support_Hyperplanes=Matrix<Integer> (0);
5396
is_Computed.set(ConeProperty::SupportHyperplanes);
5397
totalNrSimplices = 0;
5398
is_Computed.set(ConeProperty::TriangulationSize);
5399
detSum = 0;
5400
is_Computed.set(ConeProperty::TriangulationDetSum);
5401
is_Computed.set(ConeProperty::Triangulation);
5402
is_Computed.set(ConeProperty::StanleyDec);
5403
multiplicity = 1;
5404
is_Computed.set(ConeProperty::Multiplicity);
5405
is_Computed.set(ConeProperty::HilbertBasis);
5406
is_Computed.set(ConeProperty::Deg1Elements);
5407
5408
Hilbert_Series = HilbertSeries(vector<num_t>(1,1),vector<denom_t>()); // 1/1
5409
is_Computed.set(ConeProperty::HilbertSeries);
5410
5411
if (!is_Computed.test(ConeProperty::Grading)) {
5412
Grading = vector<Integer>(dim);
5413
// GradingDenom = 1;
5414
is_Computed.set(ConeProperty::Grading);
5415
}
5416
5417
pointed = true;
5418
is_Computed.set(ConeProperty::IsPointed);
5419
5420
deg1_extreme_rays = true;
5421
is_Computed.set(ConeProperty::IsDeg1ExtremeRays);
5422
5423
deg1_hilbert_basis = true;
5424
is_Computed.set(ConeProperty::IsDeg1HilbertBasis);
5425
5426
if (inhomogeneous) { // empty set of solutions
5427
is_Computed.set(ConeProperty::VerticesOfPolyhedron);
5428
module_rank = 0;
5429
is_Computed.set(ConeProperty::ModuleRank);
5430
is_Computed.set(ConeProperty::ModuleGenerators);
5431
level0_dim=0;
5432
is_Computed.set(ConeProperty::RecessionRank);
5433
}
5434
5435
if (!inhomogeneous) {
5436
ClassGroup.resize(1,0);
5437
is_Computed.set(ConeProperty::ClassGroup);
5438
}
5439
5440
if (inhomogeneous || ExcludedFaces.nr_of_rows() != 0) {
5441
multiplicity = 0;
5442
is_Computed.set(ConeProperty::Multiplicity);
5443
Hilbert_Series.reset(); // 0/1
5444
is_Computed.set(ConeProperty::HilbertSeries);
5445
}
5446
}
5447
5448
//---------------------------------------------------------------------------
5449
5450
template<typename Integer>
5451
bool Full_Cone<Integer>::isComputed(ConeProperty::Enum prop) const{
5452
return is_Computed.test(prop);
5453
}
5454
5455
//---------------------------------------------------------------------------
5456
// Data access
5457
//---------------------------------------------------------------------------
5458
5459
template<typename Integer>
5460
size_t Full_Cone<Integer>::getDimension()const{
5461
return dim;
5462
}
5463
5464
//---------------------------------------------------------------------------
5465
5466
template<typename Integer>
5467
size_t Full_Cone<Integer>::getNrGenerators()const{
5468
return nr_gen;
5469
}
5470
5471
//---------------------------------------------------------------------------
5472
5473
template<typename Integer>
5474
bool Full_Cone<Integer>::isPointed()const{
5475
return pointed;
5476
}
5477
5478
//---------------------------------------------------------------------------
5479
5480
template<typename Integer>
5481
bool Full_Cone<Integer>::isDeg1ExtremeRays() const{
5482
return deg1_extreme_rays;
5483
}
5484
5485
template<typename Integer>
5486
bool Full_Cone<Integer>::isDeg1HilbertBasis() const{
5487
return deg1_hilbert_basis;
5488
}
5489
5490
//---------------------------------------------------------------------------
5491
5492
template<typename Integer>
5493
vector<Integer> Full_Cone<Integer>::getGrading() const{
5494
return Grading;
5495
}
5496
5497
//---------------------------------------------------------------------------
5498
5499
template<typename Integer>
5500
mpq_class Full_Cone<Integer>::getMultiplicity()const{
5501
return multiplicity;
5502
}
5503
5504
//---------------------------------------------------------------------------
5505
5506
template<typename Integer>
5507
Integer Full_Cone<Integer>::getShift()const{
5508
return shift;
5509
}
5510
5511
//---------------------------------------------------------------------------
5512
5513
template<typename Integer>
5514
size_t Full_Cone<Integer>::getModuleRank()const{
5515
return module_rank;
5516
}
5517
5518
5519
//---------------------------------------------------------------------------
5520
5521
template<typename Integer>
5522
const Matrix<Integer>& Full_Cone<Integer>::getGenerators()const{
5523
return Generators;
5524
}
5525
5526
//---------------------------------------------------------------------------
5527
5528
template<typename Integer>
5529
vector<bool> Full_Cone<Integer>::getExtremeRays()const{
5530
return Extreme_Rays_Ind;
5531
}
5532
5533
//---------------------------------------------------------------------------
5534
5535
template<typename Integer>
5536
Matrix<Integer> Full_Cone<Integer>::getSupportHyperplanes()const{
5537
return Support_Hyperplanes;
5538
}
5539
5540
//---------------------------------------------------------------------------
5541
5542
template<typename Integer>
5543
Matrix<Integer> Full_Cone<Integer>::getHilbertBasis()const{
5544
if(Hilbert_Basis.empty())
5545
return Matrix<Integer>(0,dim);
5546
else
5547
return Matrix<Integer>(Hilbert_Basis);
5548
}
5549
5550
//---------------------------------------------------------------------------
5551
5552
template<typename Integer>
5553
Matrix<Integer> Full_Cone<Integer>::getModuleGeneratorsOverOriginalMonoid()const{
5554
if(ModuleGeneratorsOverOriginalMonoid.empty())
5555
return Matrix<Integer>(0,dim);
5556
else
5557
return Matrix<Integer>(ModuleGeneratorsOverOriginalMonoid);
5558
}
5559
5560
5561
//---------------------------------------------------------------------------
5562
5563
template<typename Integer>
5564
Matrix<Integer> Full_Cone<Integer>::getDeg1Elements()const{
5565
if(Deg1_Elements.empty())
5566
return Matrix<Integer>(0,dim);
5567
else
5568
return Matrix<Integer>(Deg1_Elements);
5569
}
5570
5571
//---------------------------------------------------------------------------
5572
5573
template<typename Integer>
5574
Matrix<Integer> Full_Cone<Integer>::getExcludedFaces()const{
5575
return(ExcludedFaces);
5576
}
5577
5578
//---------------------------------------------------------------------------
5579
5580
template<typename Integer>
5581
void Full_Cone<Integer>::error_msg(string s) const{
5582
errorOutput() <<"\nFull Cone "<< s<<"\n";
5583
}
5584
5585
//---------------------------------------------------------------------------
5586
5587
template<typename Integer>
5588
void Full_Cone<Integer>::print()const{
5589
verboseOutput()<<"\ndim="<<dim<<".\n";
5590
verboseOutput()<<"\nnr_gen="<<nr_gen<<".\n";
5591
// verboseOutput()<<"\nhyp_size="<<hyp_size<<".\n";
5592
verboseOutput()<<"\nGrading is:\n";
5593
verboseOutput()<< Grading;
5594
verboseOutput()<<"\nMultiplicity is "<<multiplicity<<".\n";
5595
verboseOutput()<<"\nGenerators are:\n";
5596
Generators.pretty_print(verboseOutput());
5597
verboseOutput()<<"\nExtreme_rays are:\n";
5598
verboseOutput()<< Extreme_Rays_Ind;
5599
verboseOutput()<<"\nSupport Hyperplanes are:\n";
5600
Support_Hyperplanes.pretty_print(verboseOutput());
5601
verboseOutput()<<"\nHilbert basis is:\n";
5602
verboseOutput()<< Hilbert_Basis;
5603
verboseOutput()<<"\nDeg1 elements are:\n";
5604
verboseOutput()<< Deg1_Elements;
5605
verboseOutput()<<"\nHilbert Series is:\n";
5606
verboseOutput()<<Hilbert_Series;
5607
}
5608
5609
} //end namespace
5610
5611