Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
//===- ValueEnumerator.cpp - Number values and types for bitcode writer ---===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the ValueEnumerator class.
//
//===----------------------------------------------------------------------===//

#include "ValueEnumerator.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalIFunc.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/UseListOrder.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <tuple>
#include <utility>
#include <vector>

using namespace llvm;

namespace {

struct OrderMap {
  DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
  unsigned LastGlobalConstantID = 0;
  unsigned LastGlobalValueID = 0;

  OrderMap() = default;

  bool isGlobalConstant(unsigned ID) const {
    return ID <= LastGlobalConstantID;
  }

  bool isGlobalValue(unsigned ID) const {
    return ID <= LastGlobalValueID && !isGlobalConstant(ID);
  }

  unsigned size() const { return IDs.size(); }
  std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }

  std::pair<unsigned, bool> lookup(const Value *V) const {
    return IDs.lookup(V);
  }

  void index(const Value *V) {
    // Explicitly sequence get-size and insert-value operations to avoid UB.
    unsigned ID = IDs.size() + 1;
    IDs[V].first = ID;
  }
};

} // end anonymous namespace

static void orderValue(const Value *V, OrderMap &OM) {
  if (OM.lookup(V).first)
    return;

  if (const Constant *C = dyn_cast<Constant>(V))
    if (C->getNumOperands() && !isa<GlobalValue>(C))
      for (const Value *Op : C->operands())
        if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
          orderValue(Op, OM);

  // Note: we cannot cache this lookup above, since inserting into the map
  // changes the map's size, and thus affects the other IDs.
  OM.index(V);
}

static OrderMap orderModule(const Module &M) {
  // This needs to match the order used by ValueEnumerator::ValueEnumerator()
  // and ValueEnumerator::incorporateFunction().
  OrderMap OM;

  // In the reader, initializers of GlobalValues are set *after* all the
  // globals have been read.  Rather than awkwardly modeling this behaviour
  // directly in predictValueUseListOrderImpl(), just assign IDs to
  // initializers of GlobalValues before GlobalValues themselves to model this
  // implicitly.
  for (const GlobalVariable &G : M.globals())
    if (G.hasInitializer())
      if (!isa<GlobalValue>(G.getInitializer()))
        orderValue(G.getInitializer(), OM);
  for (const GlobalAlias &A : M.aliases())
    if (!isa<GlobalValue>(A.getAliasee()))
      orderValue(A.getAliasee(), OM);
  for (const GlobalIFunc &I : M.ifuncs())
    if (!isa<GlobalValue>(I.getResolver()))
      orderValue(I.getResolver(), OM);
  for (const Function &F : M) {
    for (const Use &U : F.operands())
      if (!isa<GlobalValue>(U.get()))
        orderValue(U.get(), OM);
  }
  OM.LastGlobalConstantID = OM.size();

  // Initializers of GlobalValues are processed in
  // BitcodeReader::ResolveGlobalAndAliasInits().  Match the order there rather
  // than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
  // by giving IDs in reverse order.
  //
  // Since GlobalValues never reference each other directly (just through
  // initializers), their relative IDs only matter for determining order of
  // uses in their initializers.
  for (const Function &F : M)
    orderValue(&F, OM);
  for (const GlobalAlias &A : M.aliases())
    orderValue(&A, OM);
  for (const GlobalIFunc &I : M.ifuncs())
    orderValue(&I, OM);
  for (const GlobalVariable &G : M.globals())
    orderValue(&G, OM);
  OM.LastGlobalValueID = OM.size();

  for (const Function &F : M) {
    if (F.isDeclaration())
      continue;
    // Here we need to match the union of ValueEnumerator::incorporateFunction()
    // and WriteFunction().  Basic blocks are implicitly declared before
    // anything else (by declaring their size).
    for (const BasicBlock &BB : F)
      orderValue(&BB, OM);
    for (const Argument &A : F.args())
      orderValue(&A, OM);
    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB)
        for (const Value *Op : I.operands())
          if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
              isa<InlineAsm>(*Op))
            orderValue(Op, OM);
    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB)
        orderValue(&I, OM);
  }
  return OM;
}

static void predictValueUseListOrderImpl(const Value *V, const Function *F,
                                         unsigned ID, const OrderMap &OM,
                                         UseListOrderStack &Stack) {
  // Predict use-list order for this one.
  using Entry = std::pair<const Use *, unsigned>;
  SmallVector<Entry, 64> List;
  for (const Use &U : V->uses())
    // Check if this user will be serialized.
    if (OM.lookup(U.getUser()).first)
      List.push_back(std::make_pair(&U, List.size()));

  if (List.size() < 2)
    // We may have lost some users.
    return;

  bool IsGlobalValue = OM.isGlobalValue(ID);
  std::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) {
    const Use *LU = L.first;
    const Use *RU = R.first;
    if (LU == RU)
      return false;

    auto LID = OM.lookup(LU->getUser()).first;
    auto RID = OM.lookup(RU->getUser()).first;

    // Global values are processed in reverse order.
    //
    // Moreover, initializers of GlobalValues are set *after* all the globals
    // have been read (despite having earlier IDs).  Rather than awkwardly
    // modeling this behaviour here, orderModule() has assigned IDs to
    // initializers of GlobalValues before GlobalValues themselves.
    if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID))
      return LID < RID;

    // If ID is 4, then expect: 7 6 5 1 2 3.
    if (LID < RID) {
      if (RID <= ID)
        if (!IsGlobalValue) // GlobalValue uses don't get reversed.
          return true;
      return false;
    }
    if (RID < LID) {
      if (LID <= ID)
        if (!IsGlobalValue) // GlobalValue uses don't get reversed.
          return false;
      return true;
    }

    // LID and RID are equal, so we have different operands of the same user.
    // Assume operands are added in order for all instructions.
    if (LID <= ID)
      if (!IsGlobalValue) // GlobalValue uses don't get reversed.
        return LU->getOperandNo() < RU->getOperandNo();
    return LU->getOperandNo() > RU->getOperandNo();
  });

  if (std::is_sorted(
          List.begin(), List.end(),
          [](const Entry &L, const Entry &R) { return L.second < R.second; }))
    // Order is already correct.
    return;

  // Store the shuffle.
  Stack.emplace_back(V, F, List.size());
  assert(List.size() == Stack.back().Shuffle.size() && "Wrong size");
  for (size_t I = 0, E = List.size(); I != E; ++I)
    Stack.back().Shuffle[I] = List[I].second;
}

static void predictValueUseListOrder(const Value *V, const Function *F,
                                     OrderMap &OM, UseListOrderStack &Stack) {
  auto &IDPair = OM[V];
  assert(IDPair.first && "Unmapped value");
  if (IDPair.second)
    // Already predicted.
    return;

  // Do the actual prediction.
  IDPair.second = true;
  if (!V->use_empty() && std::next(V->use_begin()) != V->use_end())
    predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);

  // Recursive descent into constants.
  if (const Constant *C = dyn_cast<Constant>(V))
    if (C->getNumOperands()) // Visit GlobalValues.
      for (const Value *Op : C->operands())
        if (isa<Constant>(Op)) // Visit GlobalValues.
          predictValueUseListOrder(Op, F, OM, Stack);
}

static UseListOrderStack predictUseListOrder(const Module &M) {
  OrderMap OM = orderModule(M);

  // Use-list orders need to be serialized after all the users have been added
  // to a value, or else the shuffles will be incomplete.  Store them per
  // function in a stack.
  //
  // Aside from function order, the order of values doesn't matter much here.
  UseListOrderStack Stack;

  // We want to visit the functions backward now so we can list function-local
  // constants in the last Function they're used in.  Module-level constants
  // have already been visited above.
  for (auto I = M.rbegin(), E = M.rend(); I != E; ++I) {
    const Function &F = *I;
    if (F.isDeclaration())
      continue;
    for (const BasicBlock &BB : F)
      predictValueUseListOrder(&BB, &F, OM, Stack);
    for (const Argument &A : F.args())
      predictValueUseListOrder(&A, &F, OM, Stack);
    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB)
        for (const Value *Op : I.operands())
          if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
            predictValueUseListOrder(Op, &F, OM, Stack);
    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB)
        predictValueUseListOrder(&I, &F, OM, Stack);
  }

  // Visit globals last, since the module-level use-list block will be seen
  // before the function bodies are processed.
  for (const GlobalVariable &G : M.globals())
    predictValueUseListOrder(&G, nullptr, OM, Stack);
  for (const Function &F : M)
    predictValueUseListOrder(&F, nullptr, OM, Stack);
  for (const GlobalAlias &A : M.aliases())
    predictValueUseListOrder(&A, nullptr, OM, Stack);
  for (const GlobalIFunc &I : M.ifuncs())
    predictValueUseListOrder(&I, nullptr, OM, Stack);
  for (const GlobalVariable &G : M.globals())
    if (G.hasInitializer())
      predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack);
  for (const GlobalAlias &A : M.aliases())
    predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack);
  for (const GlobalIFunc &I : M.ifuncs())
    predictValueUseListOrder(I.getResolver(), nullptr, OM, Stack);
  for (const Function &F : M) {
    for (const Use &U : F.operands())
      predictValueUseListOrder(U.get(), nullptr, OM, Stack);
  }

  return Stack;
}

static bool isIntOrIntVectorValue(const std::pair<const Value*, unsigned> &V) {
  return V.first->getType()->isIntOrIntVectorTy();
}

ValueEnumerator::ValueEnumerator(const Module &M,
                                 bool ShouldPreserveUseListOrder)
    : ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {
  if (ShouldPreserveUseListOrder)
    UseListOrders = predictUseListOrder(M);

  // Enumerate the global variables.
  for (const GlobalVariable &GV : M.globals())
    EnumerateValue(&GV);

  // Enumerate the functions.
  for (const Function & F : M) {
    EnumerateValue(&F);
    EnumerateAttributes(F.getAttributes());
  }

  // Enumerate the aliases.
  for (const GlobalAlias &GA : M.aliases())
    EnumerateValue(&GA);

  // Enumerate the ifuncs.
  for (const GlobalIFunc &GIF : M.ifuncs())
    EnumerateValue(&GIF);

  // Remember what is the cutoff between globalvalue's and other constants.
  unsigned FirstConstant = Values.size();

  // Enumerate the global variable initializers and attributes.
  for (const GlobalVariable &GV : M.globals()) {
    if (GV.hasInitializer())
      EnumerateValue(GV.getInitializer());
    if (GV.hasAttributes())
      EnumerateAttributes(GV.getAttributesAsList(AttributeList::FunctionIndex));
  }

  // Enumerate the aliasees.
  for (const GlobalAlias &GA : M.aliases())
    EnumerateValue(GA.getAliasee());

  // Enumerate the ifunc resolvers.
  for (const GlobalIFunc &GIF : M.ifuncs())
    EnumerateValue(GIF.getResolver());

  // Enumerate any optional Function data.
  for (const Function &F : M)
    for (const Use &U : F.operands())
      EnumerateValue(U.get());

  // Enumerate the metadata type.
  //
  // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode
  // only encodes the metadata type when it's used as a value.
  EnumerateType(Type::getMetadataTy(M.getContext()));

  // Insert constants and metadata that are named at module level into the slot
  // pool so that the module symbol table can refer to them...
  EnumerateValueSymbolTable(M.getValueSymbolTable());
  EnumerateNamedMetadata(M);

  SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
  for (const GlobalVariable &GV : M.globals()) {
    MDs.clear();
    GV.getAllMetadata(MDs);
    for (const auto &I : MDs)
      // FIXME: Pass GV to EnumerateMetadata and arrange for the bitcode writer
      // to write metadata to the global variable's own metadata block
      // (PR28134).
      EnumerateMetadata(nullptr, I.second);
  }

  // Enumerate types used by function bodies and argument lists.
  for (const Function &F : M) {
    for (const Argument &A : F.args())
      EnumerateType(A.getType());

    // Enumerate metadata attached to this function.
    MDs.clear();
    F.getAllMetadata(MDs);
    for (const auto &I : MDs)
      EnumerateMetadata(F.isDeclaration() ? nullptr : &F, I.second);

    for (const BasicBlock &BB : F)
      for (const Instruction &I : BB) {
        for (const Use &Op : I.operands()) {
          auto *MD = dyn_cast<MetadataAsValue>(&Op);
          if (!MD) {
            EnumerateOperandType(Op);
            continue;
          }

          // Local metadata is enumerated during function-incorporation.
          if (isa<LocalAsMetadata>(MD->getMetadata()))
            continue;

          EnumerateMetadata(&F, MD->getMetadata());
        }
        EnumerateType(I.getType());
        if (const CallInst *CI = dyn_cast<CallInst>(&I))
          EnumerateAttributes(CI->getAttributes());
        else if (const InvokeInst *II = dyn_cast<InvokeInst>(&I))
          EnumerateAttributes(II->getAttributes());

        // Enumerate metadata attached with this instruction.
        MDs.clear();
        I.getAllMetadataOtherThanDebugLoc(MDs);
        for (unsigned i = 0, e = MDs.size(); i != e; ++i)
          EnumerateMetadata(&F, MDs[i].second);

        // Don't enumerate the location directly -- it has a special record
        // type -- but enumerate its operands.
        if (DILocation *L = I.getDebugLoc())
          for (const Metadata *Op : L->operands())
            EnumerateMetadata(&F, Op);
      }
  }

  // Optimize constant ordering.
  OptimizeConstants(FirstConstant, Values.size());

  // Organize metadata ordering.
  organizeMetadata();
}

unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
  InstructionMapType::const_iterator I = InstructionMap.find(Inst);
  assert(I != InstructionMap.end() && "Instruction is not mapped!");
  return I->second;
}

unsigned ValueEnumerator::getComdatID(const Comdat *C) const {
  unsigned ComdatID = Comdats.idFor(C);
  assert(ComdatID && "Comdat not found!");
  return ComdatID;
}

void ValueEnumerator::setInstructionID(const Instruction *I) {
  InstructionMap[I] = InstructionCount++;
}

unsigned ValueEnumerator::getValueID(const Value *V) const {
  if (auto *MD = dyn_cast<MetadataAsValue>(V))
    return getMetadataID(MD->getMetadata());

  ValueMapType::const_iterator I = ValueMap.find(V);
  assert(I != ValueMap.end() && "Value not in slotcalculator!");
  return I->second-1;
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void ValueEnumerator::dump() const {
  print(dbgs(), ValueMap, "Default");
  dbgs() << '\n';
  print(dbgs(), MetadataMap, "MetaData");
  dbgs() << '\n';
}
#endif

void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
                            const char *Name) const {
  OS << "Map Name: " << Name << "\n";
  OS << "Size: " << Map.size() << "\n";
  for (ValueMapType::const_iterator I = Map.begin(),
         E = Map.end(); I != E; ++I) {
    const Value *V = I->first;
    if (V->hasName())
      OS << "Value: " << V->getName();
    else
      OS << "Value: [null]\n";
    V->print(errs());
    errs() << '\n';

    OS << " Uses(" << std::distance(V->use_begin(),V->use_end()) << "):";
    for (const Use &U : V->uses()) {
      if (&U != &*V->use_begin())
        OS << ",";
      if(U->hasName())
        OS << " " << U->getName();
      else
        OS << " [null]";

    }
    OS <<  "\n\n";
  }
}

void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map,
                            const char *Name) const {
  OS << "Map Name: " << Name << "\n";
  OS << "Size: " << Map.size() << "\n";
  for (auto I = Map.begin(), E = Map.end(); I != E; ++I) {
    const Metadata *MD = I->first;
    OS << "Metadata: slot = " << I->second.ID << "\n";
    OS << "Metadata: function = " << I->second.F << "\n";
    MD->print(OS);
    OS << "\n";
  }
}

/// OptimizeConstants - Reorder constant pool for denser encoding.
void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
  if (CstStart == CstEnd || CstStart+1 == CstEnd) return;

  if (ShouldPreserveUseListOrder)
    // Optimizing constants makes the use-list order difficult to predict.
    // Disable it for now when trying to preserve the order.
    return;

  std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd,
                   [this](const std::pair<const Value *, unsigned> &LHS,
                          const std::pair<const Value *, unsigned> &RHS) {
    // Sort by plane.
    if (LHS.first->getType() != RHS.first->getType())
      return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType());
    // Then by frequency.
    return LHS.second > RHS.second;
  });

  // Ensure that integer and vector of integer constants are at the start of the
  // constant pool.  This is important so that GEP structure indices come before
  // gep constant exprs.
  std::stable_partition(Values.begin() + CstStart, Values.begin() + CstEnd,
                        isIntOrIntVectorValue);

  // Rebuild the modified portion of ValueMap.
  for (; CstStart != CstEnd; ++CstStart)
    ValueMap[Values[CstStart].first] = CstStart+1;
}

/// EnumerateValueSymbolTable - Insert all of the values in the specified symbol
/// table into the values table.
void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) {
  for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end();
       VI != VE; ++VI)
    EnumerateValue(VI->getValue());
}

/// Insert all of the values referenced by named metadata in the specified
/// module.
void ValueEnumerator::EnumerateNamedMetadata(const Module &M) {
  for (const auto &I : M.named_metadata())
    EnumerateNamedMDNode(&I);
}

void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) {
  for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
    EnumerateMetadata(nullptr, MD->getOperand(i));
}

unsigned ValueEnumerator::getMetadataFunctionID(const Function *F) const {
  return F ? getValueID(F) + 1 : 0;
}

void ValueEnumerator::EnumerateMetadata(const Function *F, const Metadata *MD) {
  EnumerateMetadata(getMetadataFunctionID(F), MD);
}

void ValueEnumerator::EnumerateFunctionLocalMetadata(
    const Function &F, const LocalAsMetadata *Local) {
  EnumerateFunctionLocalMetadata(getMetadataFunctionID(&F), Local);
}

void ValueEnumerator::dropFunctionFromMetadata(
    MetadataMapType::value_type &FirstMD) {
  SmallVector<const MDNode *, 64> Worklist;
  auto push = [&Worklist](MetadataMapType::value_type &MD) {
    auto &Entry = MD.second;

    // Nothing to do if this metadata isn't tagged.
    if (!Entry.F)
      return;

    // Drop the function tag.
    Entry.F = 0;

    // If this is has an ID and is an MDNode, then its operands have entries as
    // well.  We need to drop the function from them too.
    if (Entry.ID)
      if (auto *N = dyn_cast<MDNode>(MD.first))
        Worklist.push_back(N);
  };
  push(FirstMD);
  while (!Worklist.empty())
    for (const Metadata *Op : Worklist.pop_back_val()->operands()) {
      if (!Op)
        continue;
      auto MD = MetadataMap.find(Op);
      if (MD != MetadataMap.end())
        push(*MD);
    }
}

void ValueEnumerator::EnumerateMetadata(unsigned F, const Metadata *MD) {
  // It's vital for reader efficiency that uniqued subgraphs are done in
  // post-order; it's expensive when their operands have forward references.
  // If a distinct node is referenced from a uniqued node, it'll be delayed
  // until the uniqued subgraph has been completely traversed.
  SmallVector<const MDNode *, 32> DelayedDistinctNodes;

  // Start by enumerating MD, and then work through its transitive operands in
  // post-order.  This requires a depth-first search.
  SmallVector<std::pair<const MDNode *, MDNode::op_iterator>, 32> Worklist;
  if (const MDNode *N = enumerateMetadataImpl(F, MD))
    Worklist.push_back(std::make_pair(N, N->op_begin()));

  while (!Worklist.empty()) {
    const MDNode *N = Worklist.back().first;

    // Enumerate operands until we hit a new node.  We need to traverse these
    // nodes' operands before visiting the rest of N's operands.
    MDNode::op_iterator I = std::find_if(
        Worklist.back().second, N->op_end(),
        [&](const Metadata *MD) { return enumerateMetadataImpl(F, MD); });
    if (I != N->op_end()) {
      auto *Op = cast<MDNode>(*I);
      Worklist.back().second = ++I;

      // Delay traversing Op if it's a distinct node and N is uniqued.
      if (Op->isDistinct() && !N->isDistinct())
        DelayedDistinctNodes.push_back(Op);
      else
        Worklist.push_back(std::make_pair(Op, Op->op_begin()));
      continue;
    }

    // All the operands have been visited.  Now assign an ID.
    Worklist.pop_back();
    MDs.push_back(N);
    MetadataMap[N].ID = MDs.size();

    // Flush out any delayed distinct nodes; these are all the distinct nodes
    // that are leaves in last uniqued subgraph.
    if (Worklist.empty() || Worklist.back().first->isDistinct()) {
      for (const MDNode *N : DelayedDistinctNodes)
        Worklist.push_back(std::make_pair(N, N->op_begin()));
      DelayedDistinctNodes.clear();
    }
  }
}

const MDNode *ValueEnumerator::enumerateMetadataImpl(unsigned F, const Metadata *MD) {
  if (!MD)
    return nullptr;

  assert(
      (isa<MDNode>(MD) || isa<MDString>(MD) || isa<ConstantAsMetadata>(MD)) &&
      "Invalid metadata kind");

  auto Insertion = MetadataMap.insert(std::make_pair(MD, MDIndex(F)));
  MDIndex &Entry = Insertion.first->second;
  if (!Insertion.second) {
    // Already mapped.  If F doesn't match the function tag, drop it.
    if (Entry.hasDifferentFunction(F))
      dropFunctionFromMetadata(*Insertion.first);
    return nullptr;
  }

  // Don't assign IDs to metadata nodes.
  if (auto *N = dyn_cast<MDNode>(MD))
    return N;

  // Save the metadata.
  MDs.push_back(MD);
  Entry.ID = MDs.size();

  // Enumerate the constant, if any.
  if (auto *C = dyn_cast<ConstantAsMetadata>(MD))
    EnumerateValue(C->getValue());

  return nullptr;
}

/// EnumerateFunctionLocalMetadataa - Incorporate function-local metadata
/// information reachable from the metadata.
void ValueEnumerator::EnumerateFunctionLocalMetadata(
    unsigned F, const LocalAsMetadata *Local) {
  assert(F && "Expected a function");

  // Check to see if it's already in!
  MDIndex &Index = MetadataMap[Local];
  if (Index.ID) {
    assert(Index.F == F && "Expected the same function");
    return;
  }

  MDs.push_back(Local);
  Index.F = F;
  Index.ID = MDs.size();

  EnumerateValue(Local->getValue());
}

static unsigned getMetadataTypeOrder(const Metadata *MD) {
  // Strings are emitted in bulk and must come first.
  if (isa<MDString>(MD))
    return 0;

  // ConstantAsMetadata doesn't reference anything.  We may as well shuffle it
  // to the front since we can detect it.
  auto *N = dyn_cast<MDNode>(MD);
  if (!N)
    return 1;

  // The reader is fast forward references for distinct node operands, but slow
  // when uniqued operands are unresolved.
  return N->isDistinct() ? 2 : 3;
}

void ValueEnumerator::organizeMetadata() {
  assert(MetadataMap.size() == MDs.size() &&
         "Metadata map and vector out of sync");

  if (MDs.empty())
    return;

  // Copy out the index information from MetadataMap in order to choose a new
  // order.
  SmallVector<MDIndex, 64> Order;
  Order.reserve(MetadataMap.size());
  for (const Metadata *MD : MDs)
    Order.push_back(MetadataMap.lookup(MD));

  // Partition:
  //   - by function, then
  //   - by isa<MDString>
  // and then sort by the original/current ID.  Since the IDs are guaranteed to
  // be unique, the result of std::sort will be deterministic.  There's no need
  // for std::stable_sort.
  std::sort(Order.begin(), Order.end(), [this](MDIndex LHS, MDIndex RHS) {
    return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) <
           std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID);
  });

  // Rebuild MDs, index the metadata ranges for each function in FunctionMDs,
  // and fix up MetadataMap.
  std::vector<const Metadata *> OldMDs = std::move(MDs);
  MDs.reserve(OldMDs.size());
  for (unsigned I = 0, E = Order.size(); I != E && !Order[I].F; ++I) {
    auto *MD = Order[I].get(OldMDs);
    MDs.push_back(MD);
    MetadataMap[MD].ID = I + 1;
    if (isa<MDString>(MD))
      ++NumMDStrings;
  }

  // Return early if there's nothing for the functions.
  if (MDs.size() == Order.size())
    return;

  // Build the function metadata ranges.
  MDRange R;
  FunctionMDs.reserve(OldMDs.size());
  unsigned PrevF = 0;
  for (unsigned I = MDs.size(), E = Order.size(), ID = MDs.size(); I != E;
       ++I) {
    unsigned F = Order[I].F;
    if (!PrevF) {
      PrevF = F;
    } else if (PrevF != F) {
      R.Last = FunctionMDs.size();
      std::swap(R, FunctionMDInfo[PrevF]);
      R.First = FunctionMDs.size();

      ID = MDs.size();
      PrevF = F;
    }

    auto *MD = Order[I].get(OldMDs);
    FunctionMDs.push_back(MD);
    MetadataMap[MD].ID = ++ID;
    if (isa<MDString>(MD))
      ++R.NumStrings;
  }
  R.Last = FunctionMDs.size();
  FunctionMDInfo[PrevF] = R;
}

void ValueEnumerator::incorporateFunctionMetadata(const Function &F) {
  NumModuleMDs = MDs.size();

  auto R = FunctionMDInfo.lookup(getValueID(&F) + 1);
  NumMDStrings = R.NumStrings;
  MDs.insert(MDs.end(), FunctionMDs.begin() + R.First,
             FunctionMDs.begin() + R.Last);
}

void ValueEnumerator::EnumerateValue(const Value *V) {
  assert(!V->getType()->isVoidTy() && "Can't insert void values!");
  assert(!isa<MetadataAsValue>(V) && "EnumerateValue doesn't handle Metadata!");

  // Check to see if it's already in!
  unsigned &ValueID = ValueMap[V];
  if (ValueID) {
    // Increment use count.
    Values[ValueID-1].second++;
    return;
  }

  if (auto *GO = dyn_cast<GlobalObject>(V))
    if (const Comdat *C = GO->getComdat())
      Comdats.insert(C);

  // Enumerate the type of this value.
  EnumerateType(V->getType());

  if (const Constant *C = dyn_cast<Constant>(V)) {
    if (isa<GlobalValue>(C)) {
      // Initializers for globals are handled explicitly elsewhere.
    } else if (C->getNumOperands()) {
      // If a constant has operands, enumerate them.  This makes sure that if a
      // constant has uses (for example an array of const ints), that they are
      // inserted also.

      // We prefer to enumerate them with values before we enumerate the user
      // itself.  This makes it more likely that we can avoid forward references
      // in the reader.  We know that there can be no cycles in the constants
      // graph that don't go through a global variable.
      for (User::const_op_iterator I = C->op_begin(), E = C->op_end();
           I != E; ++I)
        if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress.
          EnumerateValue(*I);

      // Finally, add the value.  Doing this could make the ValueID reference be
      // dangling, don't reuse it.
      Values.push_back(std::make_pair(V, 1U));
      ValueMap[V] = Values.size();
      return;
    }
  }

  // Add the value.
  Values.push_back(std::make_pair(V, 1U));
  ValueID = Values.size();
}


void ValueEnumerator::EnumerateType(Type *Ty) {
  unsigned *TypeID = &TypeMap[Ty];

  // We've already seen this type.
  if (*TypeID)
    return;

  // If it is a non-anonymous struct, mark the type as being visited so that we
  // don't recursively visit it.  This is safe because we allow forward
  // references of these in the bitcode reader.
  if (StructType *STy = dyn_cast<StructType>(Ty))
    if (!STy->isLiteral())
      *TypeID = ~0U;

  // Enumerate all of the subtypes before we enumerate this type.  This ensures
  // that the type will be enumerated in an order that can be directly built.
  for (Type *SubTy : Ty->subtypes())
    EnumerateType(SubTy);

  // Refresh the TypeID pointer in case the table rehashed.
  TypeID = &TypeMap[Ty];

  // Check to see if we got the pointer another way.  This can happen when
  // enumerating recursive types that hit the base case deeper than they start.
  //
  // If this is actually a struct that we are treating as forward ref'able,
  // then emit the definition now that all of its contents are available.
  if (*TypeID && *TypeID != ~0U)
    return;

  // Add this type now that its contents are all happily enumerated.
  Types.push_back(Ty);

  *TypeID = Types.size();
}

// Enumerate the types for the specified value.  If the value is a constant,
// walk through it, enumerating the types of the constant.
void ValueEnumerator::EnumerateOperandType(const Value *V) {
  EnumerateType(V->getType());

  assert(!isa<MetadataAsValue>(V) && "Unexpected metadata operand");

  const Constant *C = dyn_cast<Constant>(V);
  if (!C)
    return;

  // If this constant is already enumerated, ignore it, we know its type must
  // be enumerated.
  if (ValueMap.count(C))
    return;

  // This constant may have operands, make sure to enumerate the types in
  // them.
  for (const Value *Op : C->operands()) {
    // Don't enumerate basic blocks here, this happens as operands to
    // blockaddress.
    if (isa<BasicBlock>(Op))
      continue;

    EnumerateOperandType(Op);
  }
}

void ValueEnumerator::EnumerateAttributes(AttributeList PAL) {
  if (PAL.isEmpty()) return;  // null is always 0.

  // Do a lookup.
  unsigned &Entry = AttributeListMap[PAL];
  if (Entry == 0) {
    // Never saw this before, add it.
    AttributeLists.push_back(PAL);
    Entry = AttributeLists.size();
  }

  // Do lookups for all attribute groups.
  for (unsigned i = PAL.index_begin(), e = PAL.index_end(); i != e; ++i) {
    AttributeSet AS = PAL.getAttributes(i);
    if (!AS.hasAttributes())
      continue;
    IndexAndAttrSet Pair = {i, AS};
    unsigned &Entry = AttributeGroupMap[Pair];
    if (Entry == 0) {
      AttributeGroups.push_back(Pair);
      Entry = AttributeGroups.size();
    }
  }
}

void ValueEnumerator::incorporateFunction(const Function &F) {
  InstructionCount = 0;
  NumModuleValues = Values.size();

  // Add global metadata to the function block.  This doesn't include
  // LocalAsMetadata.
  incorporateFunctionMetadata(F);

  // Adding function arguments to the value table.
  for (const auto &I : F.args())
    EnumerateValue(&I);

  FirstFuncConstantID = Values.size();

  // Add all function-level constants to the value table.
  for (const BasicBlock &BB : F) {
    for (const Instruction &I : BB)
      for (const Use &OI : I.operands()) {
        if ((isa<Constant>(OI) && !isa<GlobalValue>(OI)) || isa<InlineAsm>(OI))
          EnumerateValue(OI);
      }
    BasicBlocks.push_back(&BB);
    ValueMap[&BB] = BasicBlocks.size();
  }

  // Optimize the constant layout.
  OptimizeConstants(FirstFuncConstantID, Values.size());

  // Add the function's parameter attributes so they are available for use in
  // the function's instruction.
  EnumerateAttributes(F.getAttributes());

  FirstInstID = Values.size();

  SmallVector<LocalAsMetadata *, 8> FnLocalMDVector;
  // Add all of the instructions.
  for (const BasicBlock &BB : F) {
    for (const Instruction &I : BB) {
      for (const Use &OI : I.operands()) {
        if (auto *MD = dyn_cast<MetadataAsValue>(&OI))
          if (auto *Local = dyn_cast<LocalAsMetadata>(MD->getMetadata()))
            // Enumerate metadata after the instructions they might refer to.
            FnLocalMDVector.push_back(Local);
      }

      if (!I.getType()->isVoidTy())
        EnumerateValue(&I);
    }
  }

  // Add all of the function-local metadata.
  for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i) {
    // At this point, every local values have been incorporated, we shouldn't
    // have a metadata operand that references a value that hasn't been seen.
    assert(ValueMap.count(FnLocalMDVector[i]->getValue()) &&
           "Missing value for metadata operand");
    EnumerateFunctionLocalMetadata(F, FnLocalMDVector[i]);
  }
}

void ValueEnumerator::purgeFunction() {
  /// Remove purged values from the ValueMap.
  for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i)
    ValueMap.erase(Values[i].first);
  for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i)
    MetadataMap.erase(MDs[i]);
  for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
    ValueMap.erase(BasicBlocks[i]);

  Values.resize(NumModuleValues);
  MDs.resize(NumModuleMDs);
  BasicBlocks.clear();
  NumMDStrings = 0;
}

static void IncorporateFunctionInfoGlobalBBIDs(const Function *F,
                                 DenseMap<const BasicBlock*, unsigned> &IDMap) {
  unsigned Counter = 0;
  for (const BasicBlock &BB : *F)
    IDMap[&BB] = ++Counter;
}

/// getGlobalBasicBlockID - This returns the function-specific ID for the
/// specified basic block.  This is relatively expensive information, so it
/// should only be used by rare constructs such as address-of-label.
unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
  unsigned &Idx = GlobalBasicBlockIDs[BB];
  if (Idx != 0)
    return Idx-1;

  IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs);
  return getGlobalBasicBlockID(BB);
}

uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const {
  return Log2_32_Ceil(getTypes().size() + 1);
}