LEFT | RIGHT |
1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ | 1 /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */ |
2 /* | 2 /* |
3 * Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC) | 3 * Copyright (c) 2011 Centre Tecnologic de Telecomunicacions de Catalunya (CTTC) |
4 * | 4 * |
5 * This program is free software; you can redistribute it and/or modify | 5 * This program is free software; you can redistribute it and/or modify |
6 * it under the terms of the GNU General Public License version 2 as | 6 * it under the terms of the GNU General Public License version 2 as |
7 * published by the Free Software Foundation; | 7 * published by the Free Software Foundation; |
8 * | 8 * |
9 * This program is distributed in the hope that it will be useful, | 9 * This program is distributed in the hope that it will be useful, |
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
313 } | 313 } |
314 | 314 |
315 return; | 315 return; |
316 } | 316 } |
317 | 317 |
318 void | 318 void |
319 FdBetFfMacScheduler::DoSchedDlPagingBufferReq (const struct FfMacSchedSapProvide
r::SchedDlPagingBufferReqParameters& params) | 319 FdBetFfMacScheduler::DoSchedDlPagingBufferReq (const struct FfMacSchedSapProvide
r::SchedDlPagingBufferReqParameters& params) |
320 { | 320 { |
321 NS_LOG_FUNCTION (this); | 321 NS_LOG_FUNCTION (this); |
322 NS_FATAL_ERROR ("method not implemented"); | 322 NS_FATAL_ERROR ("method not implemented"); |
323 NS_UNUSED(params); | 323 NS_UNUSED (params); |
324 return; | 324 return; |
325 } | 325 } |
326 | 326 |
327 void | 327 void |
328 FdBetFfMacScheduler::DoSchedDlMacBufferReq (const struct FfMacSchedSapProvider::
SchedDlMacBufferReqParameters& params) | 328 FdBetFfMacScheduler::DoSchedDlMacBufferReq (const struct FfMacSchedSapProvider::
SchedDlMacBufferReqParameters& params) |
329 { | 329 { |
330 NS_LOG_FUNCTION (this); | 330 NS_LOG_FUNCTION (this); |
331 NS_FATAL_ERROR ("method not implemented"); | 331 NS_FATAL_ERROR ("method not implemented"); |
332 NS_UNUSED(params); | 332 NS_UNUSED (params); |
333 return; | 333 return; |
334 } | 334 } |
335 | 335 |
336 int | 336 int |
337 FdBetFfMacScheduler::GetRbgSize (int dlbandwidth) | 337 FdBetFfMacScheduler::GetRbgSize (int dlbandwidth) |
338 { | 338 { |
339 for (int i = 0; i < 4; i++) | 339 for (int i = 0; i < 4; i++) |
340 { | 340 { |
341 if (dlbandwidth < FdBetType0AllocationRbg[i]) | 341 if (dlbandwidth < FdBetType0AllocationRbg[i]) |
342 { | 342 { |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
495 int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize; | 495 int rbgNum = m_cschedCellConfig.m_dlBandwidth / rbgSize; |
496 std::map <uint16_t, std::vector <uint16_t> > allocationMap; // RBs map per RNT
I | 496 std::map <uint16_t, std::vector <uint16_t> > allocationMap; // RBs map per RNT
I |
497 std::vector <bool> rbgMap; // global RBGs map | 497 std::vector <bool> rbgMap; // global RBGs map |
498 uint16_t rbgAllocatedNum = 0; | 498 uint16_t rbgAllocatedNum = 0; |
499 std::set <uint16_t> rntiAllocated; | 499 std::set <uint16_t> rntiAllocated; |
500 rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false); | 500 rbgMap.resize (m_cschedCellConfig.m_dlBandwidth / rbgSize, false); |
501 FfMacSchedSapUser::SchedDlConfigIndParameters ret; | 501 FfMacSchedSapUser::SchedDlConfigIndParameters ret; |
502 | 502 |
503 | 503 |
504 // update UL HARQ proc id | 504 // update UL HARQ proc id |
505 std::map <uint16_t, uint8_t>::iterator itProcId; | 505 for (std::map <uint16_t, uint8_t>::iterator itProcId = m_ulHarqCurrentProcessI
d.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++) |
506 for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrent
ProcessId.end (); itProcId++) | |
507 { | 506 { |
508 (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM; | 507 (*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM; |
509 } | 508 } |
510 | 509 |
511 // RACH Allocation | 510 // RACH Allocation |
512 m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0); | 511 m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0); |
513 uint16_t rbStart = 0; | 512 uint8_t rbStart = 0; |
514 std::vector <struct RachListElement_s>::iterator itRach; | 513 std::vector <struct RachListElement_s>::iterator itRach; |
515 for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++) | 514 for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++) |
516 { | 515 { |
517 NS_ASSERT_MSG (m_amc->GetTbSizeFromMcs (m_ulGrantMcs, m_cschedCellConfig.m
_ulBandwidth) > (*itRach).m_estimatedSize, " Default UL Grant MCS does not allow
to send RACH messages"); | 516 NS_ASSERT_MSG (m_amc->GetUlTbSizeFromMcs (m_ulGrantMcs, m_cschedCellConfig
.m_ulBandwidth) > (*itRach).m_estimatedSize, " Default UL Grant MCS does not all
ow to send RACH messages"); |
518 BuildRarListElement_s newRar; | 517 BuildRarListElement_s newRar; |
519 newRar.m_rnti = (*itRach).m_rnti; | 518 newRar.m_rnti = (*itRach).m_rnti; |
520 // DL-RACH Allocation | 519 // DL-RACH Allocation |
521 // Ideal: no needs of configuring m_dci | 520 // Ideal: no needs of configuring m_dci |
522 // UL-RACH Allocation | 521 // UL-RACH Allocation |
523 newRar.m_grant.m_rnti = newRar.m_rnti; | 522 newRar.m_grant.m_rnti = newRar.m_rnti; |
524 newRar.m_grant.m_mcs = m_ulGrantMcs; | 523 newRar.m_grant.m_mcs = m_ulGrantMcs; |
525 uint16_t rbLen = 1; | 524 uint8_t rbLen = 1; |
526 uint16_t tbSizeBits = 0; | 525 uint16_t tbSizeBits = 0; |
527 // find lowest TB size that fits UL grant estimated size | 526 // find lowest TB size that fits UL grant estimated size |
528 while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < m_cs
chedCellConfig.m_ulBandwidth)) | 527 while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < m_cs
chedCellConfig.m_ulBandwidth)) |
529 { | 528 { |
530 rbLen++; | 529 rbLen++; |
531 tbSizeBits = static_cast<uint16_t>(m_amc->GetTbSizeFromMcs (m_ulGrantM
cs, rbLen)); | 530 tbSizeBits = m_amc->GetUlTbSizeFromMcs (m_ulGrantMcs, rbLen); |
532 } | 531 } |
533 if (tbSizeBits < (*itRach).m_estimatedSize) | 532 if (tbSizeBits < (*itRach).m_estimatedSize) |
534 { | 533 { |
535 // no more allocation space: finish allocation | 534 // no more allocation space: finish allocation |
536 break; | 535 break; |
537 } | 536 } |
538 newRar.m_grant.m_rbStart = static_cast<uint8_t>(rbStart); | 537 newRar.m_grant.m_rbStart = rbStart; |
539 newRar.m_grant.m_rbLen = static_cast<uint8_t>(rbLen); | 538 newRar.m_grant.m_rbLen = rbLen; |
540 newRar.m_grant.m_tbSize = tbSizeBits / 8; | 539 newRar.m_grant.m_tbSize = tbSizeBits / 8; |
541 newRar.m_grant.m_hopping = false; | 540 newRar.m_grant.m_hopping = false; |
542 newRar.m_grant.m_tpc = 0; | 541 newRar.m_grant.m_tpc = 0; |
543 newRar.m_grant.m_cqiRequest = false; | 542 newRar.m_grant.m_cqiRequest = false; |
544 newRar.m_grant.m_ulDelay = false; | 543 newRar.m_grant.m_ulDelay = false; |
545 NS_LOG_INFO (this << " UL grant allocated to RNTI " << (*itRach).m_rnti <<
" rbStart " << rbStart << " rbLen " << rbLen << " MCS " << m_ulGrantMcs << " tb
Size " << newRar.m_grant.m_tbSize); | 544 NS_LOG_INFO (this << " UL grant allocated to RNTI " << (*itRach).m_rnti <<
" rbStart " << rbStart << " rbLen " << rbLen << " MCS " << m_ulGrantMcs << " tb
Size " << newRar.m_grant.m_tbSize); |
546 for (uint16_t i = rbStart; i < rbStart + rbLen; i++) | 545 for (uint16_t i = rbStart; i < rbStart + rbLen; i++) |
547 { | 546 { |
548 m_rachAllocationMap.at (i) = (*itRach).m_rnti; | 547 m_rachAllocationMap.at (i) = (*itRach).m_rnti; |
549 } | 548 } |
550 | 549 |
551 if (m_harqOn == true) | 550 if (m_harqOn == true) |
552 { | 551 { |
553 // generate UL-DCI for HARQ retransmissions | 552 // generate UL-DCI for HARQ retransmissions |
554 UlDciListElement_s uldci; | 553 UlDciListElement_s uldci; |
555 uldci.m_rnti = newRar.m_rnti; | 554 uldci.m_rnti = newRar.m_rnti; |
556 uldci.m_rbLen = static_cast<uint8_t>(rbLen); | 555 uldci.m_rbLen = rbLen; |
557 uldci.m_rbStart = static_cast<uint8_t>(rbStart); | 556 uldci.m_rbStart = rbStart; |
558 uldci.m_mcs = m_ulGrantMcs; | 557 uldci.m_mcs = m_ulGrantMcs; |
559 uldci.m_tbSize = tbSizeBits / 8; | 558 uldci.m_tbSize = tbSizeBits / 8; |
560 uldci.m_ndi = 1; | 559 uldci.m_ndi = 1; |
561 uldci.m_cceIndex = 0; | 560 uldci.m_cceIndex = 0; |
562 uldci.m_aggrLevel = 1; | 561 uldci.m_aggrLevel = 1; |
563 uldci.m_ueTxAntennaSelection = 3; // antenna selection OFF | 562 uldci.m_ueTxAntennaSelection = 3; // antenna selection OFF |
564 uldci.m_hopping = false; | 563 uldci.m_hopping = false; |
565 uldci.m_n2Dmrs = 0; | 564 uldci.m_n2Dmrs = 0; |
566 uldci.m_tpc = 0; // no power control | 565 uldci.m_tpc = 0; // no power control |
567 uldci.m_cqiRequest = false; // only period CQI at this stage | 566 uldci.m_cqiRequest = false; // only period CQI at this stage |
568 uldci.m_ulIndex = 0; // TDD parameter | 567 uldci.m_ulIndex = 0; // TDD parameter |
569 uldci.m_dai = 1; // TDD parameter | 568 uldci.m_dai = 1; // TDD parameter |
570 uldci.m_freqHopping = 0; | 569 uldci.m_freqHopping = 0; |
571 uldci.m_pdcchPowerOffset = 0; // not used | 570 uldci.m_pdcchPowerOffset = 0; // not used |
572 | 571 |
573 uint8_t harqId = 0; | 572 uint8_t harqId = 0; |
574 std::map <uint16_t, uint8_t>::iterator itProcId1; | 573 std::map <uint16_t, uint8_t>::iterator itProcId; |
575 itProcId1 = m_ulHarqCurrentProcessId.find (uldci.m_rnti); | 574 itProcId = m_ulHarqCurrentProcessId.find (uldci.m_rnti); |
576 if (itProcId1 == m_ulHarqCurrentProcessId.end ()) | 575 if (itProcId == m_ulHarqCurrentProcessId.end ()) |
577 { | 576 { |
578 NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_r
nti); | 577 NS_FATAL_ERROR ("No info find in HARQ buffer for UE " << uldci.m_r
nti); |
579 } | 578 } |
580 harqId = (*itProcId1).second; | 579 harqId = (*itProcId).second; |
581 std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ul
HarqProcessesDciBuffer.find (uldci.m_rnti); | 580 std::map <uint16_t, UlHarqProcessesDciBuffer_t>::iterator itDci = m_ul
HarqProcessesDciBuffer.find (uldci.m_rnti); |
582 if (itDci == m_ulHarqProcessesDciBuffer.end ()) | 581 if (itDci == m_ulHarqProcessesDciBuffer.end ()) |
583 { | 582 { |
584 NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer f
or RNTI " << uldci.m_rnti); | 583 NS_FATAL_ERROR ("Unable to find RNTI entry in UL DCI HARQ buffer f
or RNTI " << uldci.m_rnti); |
585 } | 584 } |
586 (*itDci).second.at (harqId) = uldci; | 585 (*itDci).second.at (harqId) = uldci; |
587 } | 586 } |
588 | 587 |
589 rbStart = rbStart + rbLen; | 588 rbStart = rbStart + rbLen; |
590 ret.m_buildRarList.push_back (newRar); | 589 ret.m_buildRarList.push_back (newRar); |
(...skipping 26 matching lines...) Expand all Loading... |
617 } | 616 } |
618 std::vector <struct DlInfoListElement_s> dlInfoListUntxed; | 617 std::vector <struct DlInfoListElement_s> dlInfoListUntxed; |
619 for (uint16_t i = 0; i < m_dlInfoListBuffered.size (); i++) | 618 for (uint16_t i = 0; i < m_dlInfoListBuffered.size (); i++) |
620 { | 619 { |
621 std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuf
fered.at (i).m_rnti); | 620 std::set <uint16_t>::iterator itRnti = rntiAllocated.find (m_dlInfoListBuf
fered.at (i).m_rnti); |
622 if (itRnti != rntiAllocated.end ()) | 621 if (itRnti != rntiAllocated.end ()) |
623 { | 622 { |
624 // RNTI already allocated for retx | 623 // RNTI already allocated for retx |
625 continue; | 624 continue; |
626 } | 625 } |
627 uint8_t nLayers = static_cast<uint8_t>(m_dlInfoListBuffered.at (i).m_harqS
tatus.size ()); | 626 uint8_t nLayers = static_cast<uint8_t> (m_dlInfoListBuffered.at (i).m_harq
Status.size ()); |
628 std::vector <bool> retx; | 627 std::vector <bool> retx; |
629 NS_LOG_INFO (this << " Processing DLHARQ feedback"); | 628 NS_LOG_INFO (this << " Processing DLHARQ feedback"); |
630 if (nLayers == 1) | 629 if (nLayers == 1) |
631 { | 630 { |
632 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlI
nfoListElement_s::NACK); | 631 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlI
nfoListElement_s::NACK); |
633 retx.push_back (false); | 632 retx.push_back (false); |
634 } | 633 } |
635 else | 634 else |
636 { | 635 { |
637 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlI
nfoListElement_s::NACK); | 636 retx.push_back (m_dlInfoListBuffered.at (i).m_harqStatus.at (0) == DlI
nfoListElement_s::NACK); |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
714 NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned"); | 713 NS_LOG_INFO ("RBG " << dciRbg.at (j) << " assigned"); |
715 rbgAllocatedNum++; | 714 rbgAllocatedNum++; |
716 } | 715 } |
717 | 716 |
718 NS_LOG_INFO (this << " Send retx in the same RBGs"); | 717 NS_LOG_INFO (this << " Send retx in the same RBGs"); |
719 } | 718 } |
720 else | 719 else |
721 { | 720 { |
722 // find RBGs for sending HARQ retx | 721 // find RBGs for sending HARQ retx |
723 uint8_t j = 0; | 722 uint8_t j = 0; |
724 uint8_t rbgId = static_cast<uint8_t>((dciRbg.at (dciRbg.size () -
1) + 1) % rbgNum); | 723 uint8_t rbgId = static_cast<uint8_t> ((dciRbg.at (dciRbg.size () -
1) + 1) % rbgNum); |
725 uint8_t startRbg = static_cast<uint8_t>(dciRbg.at (dciRbg.size ()
- 1)); | 724 uint8_t startRbg = static_cast<uint8_t> (dciRbg.at (dciRbg.size ()
- 1)); |
726 std::vector <bool> rbgMapCopy = rbgMap; | 725 std::vector <bool> rbgMapCopy = rbgMap; |
727 while ((j < dciRbg.size ())&&(startRbg != rbgId)) | 726 while ((j < dciRbg.size ())&&(startRbg != rbgId)) |
728 { | 727 { |
729 if (rbgMapCopy.at (rbgId) == false) | 728 if (rbgMapCopy.at (rbgId) == false) |
730 { | 729 { |
731 rbgMapCopy.at (rbgId) = true; | 730 rbgMapCopy.at (rbgId) = true; |
732 dciRbg.at (j) = rbgId; | 731 dciRbg.at (j) = rbgId; |
733 j++; | 732 j++; |
734 } | 733 } |
735 rbgId = (rbgId + 1) % rbgNum; | 734 rbgId = (rbgId + 1) % rbgNum; |
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
932 if (metric > metricMax) | 931 if (metric > metricMax) |
933 { | 932 { |
934 metricMax = metric; | 933 metricMax = metric; |
935 itMax = it; | 934 itMax = it; |
936 } | 935 } |
937 rbgPerRntiLog.insert (std::pair<uint16_t, int> ((*it).first, 1)); | 936 rbgPerRntiLog.insert (std::pair<uint16_t, int> ((*it).first, 1)); |
938 } | 937 } |
939 | 938 |
940 | 939 |
941 // The scheduler tries the best to achieve the equal throughput among all
UEs | 940 // The scheduler tries the best to achieve the equal throughput among all
UEs |
942 int i = 0; | 941 uint16_t i = 0; |
943 do· | 942 do· |
944 { | 943 { |
945 NS_LOG_INFO (this << " ALLOCATION for RBG " << i << " of " << rbgNum); | 944 NS_LOG_INFO (this << " ALLOCATION for RBG " << i << " of " << rbgNum); |
946 if (rbgMap.at (i) == false) | 945 if (rbgMap.at (i) == false) |
947 { | 946 { |
948 // allocate one RBG to current UE | 947 // allocate one RBG to current UE |
949 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; | 948 std::map <uint16_t, std::vector <uint16_t> >::iterator itMap; |
950 std::vector <uint16_t> tempMap; | 949 std::vector <uint16_t> tempMap; |
951 itMap = allocationMap.find ((*itMax).first); | 950 itMap = allocationMap.find ((*itMax).first); |
952 if (itMap == allocationMap.end ()) | 951 if (itMap == allocationMap.end ()) |
953 { | 952 { |
954 tempMap.push_back (static_cast<uint16_t>(i)); | 953 tempMap.push_back (i); |
955 allocationMap.insert (std::pair <uint16_t, std::vector <uint16
_t> > ((*itMax).first, tempMap)); | 954 allocationMap.insert (std::pair <uint16_t, std::vector <uint16
_t> > ((*itMax).first, tempMap)); |
956 } | 955 } |
957 else | 956 else |
958 { | 957 { |
959 (*itMap).second.push_back (static_cast<uint16_t>(i)); | 958 (*itMap).second.push_back (i); |
960 } | 959 } |
961 | 960 |
962 // caculate expected throughput for current UE | 961 // caculate expected throughput for current UE |
963 std::map <uint16_t,uint8_t>::iterator itCqi; | 962 std::map <uint16_t,uint8_t>::iterator itCqi; |
964 itCqi = m_p10CqiRxed.find ((*itMax).first); | 963 itCqi = m_p10CqiRxed.find ((*itMax).first); |
965 std::map <uint16_t,uint8_t>::iterator itTxMode; | 964 std::map <uint16_t,uint8_t>::iterator itTxMode; |
966 itTxMode = m_uesTxMode.find ((*itMax).first); | 965 itTxMode = m_uesTxMode.find ((*itMax).first); |
967 if (itTxMode == m_uesTxMode.end ()) | 966 if (itTxMode == m_uesTxMode.end ()) |
968 { | 967 { |
969 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*itMa
x).first); | 968 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*itMa
x).first); |
970 } | 969 } |
971 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode)
.second); | 970 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode)
.second); |
972 std::vector <uint8_t> mcs; | 971 std::vector <uint8_t> mcs; |
973 for (uint8_t j = 0; j < nLayer; j++)· | 972 for (uint8_t j = 0; j < nLayer; j++)· |
974 { | 973 { |
975 if (itCqi == m_p10CqiRxed.end ()) | 974 if (itCqi == m_p10CqiRxed.end ()) |
976 { | 975 { |
977 mcs.push_back (0); // no info on this user -> lowest MCS | 976 mcs.push_back (0); // no info on this user -> lowest MCS |
978 } | 977 } |
979 else | 978 else |
980 { | 979 { |
981 mcs.push_back (static_cast<uint8_t>(m_amc->GetMcsFromCqi (
(*itCqi).second))); | 980 mcs.push_back (m_amc->GetMcsFromCqi ((*itCqi).second)); |
982 } | 981 } |
983 } | 982 } |
984 | 983 |
985 std::map <uint16_t,int>::iterator itRbgPerRntiLog; | 984 std::map <uint16_t,int>::iterator itRbgPerRntiLog; |
986 itRbgPerRntiLog = rbgPerRntiLog.find ((*itMax).first); | 985 itRbgPerRntiLog = rbgPerRntiLog.find ((*itMax).first); |
987 std::map <uint16_t, fdbetsFlowPerf_t>::iterator itPastAveThr; | 986 std::map <uint16_t, fdbetsFlowPerf_t>::iterator itPastAveThr; |
988 itPastAveThr = m_flowStatsDl.find ((*itMax).first); | 987 itPastAveThr = m_flowStatsDl.find ((*itMax).first); |
989 uint32_t bytesTxed = 0; | 988 uint32_t bytesTxed = 0; |
990 for (uint8_t j = 0; j < nLayer; j++) | 989 for (uint8_t j = 0; j < nLayer; j++) |
991 { | 990 { |
992 int tbSize = (m_amc->GetTbSizeFromMcs (mcs.at (0), (*itRbgPerR
ntiLog).second * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7
.2.1-1 of 36.213) | 991 int tbSize = (m_amc->GetDlTbSizeFromMcs (mcs.at (0), (*itRbgPe
rRntiLog).second * rbgSize) / 8); // (size of TB in bytes according to table 7.1
.7.2.1-1 of 36.213) |
993 bytesTxed += tbSize; | 992 bytesTxed += tbSize; |
994 } | 993 } |
995 double expectedAveThr = ((1.0 - (1.0 / m_timeWindow)) * (*itPastAv
eThr).second.lastAveragedThroughput) + ((1.0 / m_timeWindow) * (double)(bytesTxe
d / 0.001)); | 994 double expectedAveThr = ((1.0 - (1.0 / m_timeWindow)) * (*itPastAv
eThr).second.lastAveragedThroughput) + ((1.0 / m_timeWindow) * (double)(bytesTxe
d / 0.001)); |
996 | 995 |
997 int rbgPerRnti = (*itRbgPerRntiLog).second; | 996 int rbgPerRnti = (*itRbgPerRntiLog).second; |
998 rbgPerRnti++; | 997 rbgPerRnti++; |
999 rbgPerRntiLog[(*itMax).first] = rbgPerRnti; | 998 rbgPerRntiLog[(*itMax).first] = rbgPerRnti; |
1000 estAveThr[(*itMax).first] = expectedAveThr; | 999 estAveThr[(*itMax).first] = expectedAveThr; |
1001 | 1000 |
1002 // find new UE with largest priority metric | 1001 // find new UE with largest priority metric |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1035 while (itMap != allocationMap.end ()) | 1034 while (itMap != allocationMap.end ()) |
1036 { | 1035 { |
1037 // create new BuildDataListElement_s for this LC | 1036 // create new BuildDataListElement_s for this LC |
1038 BuildDataListElement_s newEl; | 1037 BuildDataListElement_s newEl; |
1039 newEl.m_rnti = (*itMap).first; | 1038 newEl.m_rnti = (*itMap).first; |
1040 // create the DlDciListElement_s | 1039 // create the DlDciListElement_s |
1041 DlDciListElement_s newDci; | 1040 DlDciListElement_s newDci; |
1042 newDci.m_rnti = (*itMap).first; | 1041 newDci.m_rnti = (*itMap).first; |
1043 newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first); | 1042 newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first); |
1044 | 1043 |
1045 uint16_t lcActives = static_cast<uint16_t>(LcActivePerFlow ((*itMap).first
)); | 1044 uint16_t lcActives = static_cast<uint16_t> (LcActivePerFlow ((*itMap).firs
t)); |
1046 NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActi
ves); | 1045 NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActi
ves); |
1047 if (lcActives == 0) | 1046 if (lcActives == 0) |
1048 { | 1047 { |
1049 // Set to max value, to avoid divide by 0 below | 1048 // Set to max value, to avoid divide by 0 below |
1050 lcActives = (uint16_t)65535; // UINT16_MAX; | 1049 lcActives = (uint16_t)65535; // UINT16_MAX; |
1051 } | 1050 } |
1052 uint16_t RgbPerRnti = static_cast<uint16_t>((*itMap).second.size ()); | 1051 uint16_t RgbPerRnti = static_cast<uint16_t> ((*itMap).second.size ()); |
1053 std::map <uint16_t,uint8_t>::iterator itCqi; | 1052 std::map <uint16_t,uint8_t>::iterator itCqi; |
1054 itCqi = m_p10CqiRxed.find ((*itMap).first); | 1053 itCqi = m_p10CqiRxed.find ((*itMap).first); |
1055 std::map <uint16_t,uint8_t>::iterator itTxMode; | 1054 std::map <uint16_t,uint8_t>::iterator itTxMode; |
1056 itTxMode = m_uesTxMode.find ((*itMap).first); | 1055 itTxMode = m_uesTxMode.find ((*itMap).first); |
1057 if (itTxMode == m_uesTxMode.end ()) | 1056 if (itTxMode == m_uesTxMode.end ()) |
1058 { | 1057 { |
1059 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*itMap).first
); | 1058 NS_FATAL_ERROR ("No Transmission Mode info on user " << (*itMap).first
); |
1060 } | 1059 } |
1061 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second)
; | 1060 int nLayer = TransmissionModesLayers::TxMode2LayerNum ((*itTxMode).second)
; |
1062 | 1061 |
1063 uint32_t bytesTxed = 0; | 1062 uint32_t bytesTxed = 0; |
1064 for (uint8_t j = 0; j < nLayer; j++) | 1063 for (uint8_t j = 0; j < nLayer; j++) |
1065 { | 1064 { |
1066 if (itCqi == m_p10CqiRxed.end ()) | 1065 if (itCqi == m_p10CqiRxed.end ()) |
1067 { | 1066 { |
1068 newDci.m_mcs.push_back (0); // no info on this user -> lowest MCS | 1067 newDci.m_mcs.push_back (0); // no info on this user -> lowest MCS |
1069 } | 1068 } |
1070 else | 1069 else |
1071 { | 1070 { |
1072 newDci.m_mcs.push_back ( static_cast<uint8_t>(m_amc->GetMcsFromCqi
((*itCqi).second) )); | 1071 newDci.m_mcs.push_back ( m_amc->GetMcsFromCqi ((*itCqi).second) ); |
1073 } | 1072 } |
1074 | 1073 |
1075 int tbSize = (m_amc->GetTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti
* rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.2
13) | 1074 int tbSize = (m_amc->GetDlTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRn
ti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36
.213) |
1076 newDci.m_tbsSize.push_back (static_cast<uint16_t>(tbSize)); | 1075 newDci.m_tbsSize.push_back (static_cast<uint16_t> (tbSize)); |
1077 bytesTxed += tbSize; | 1076 bytesTxed += tbSize; |
1078 } | 1077 } |
1079 | 1078 |
1080 newDci.m_resAlloc = 0; // only allocation type 0 at this stage | 1079 newDci.m_resAlloc = 0; // only allocation type 0 at this stage |
1081 newDci.m_rbBitmap = 0; // TBD (32 bit bitmap see 7.1.6 of 36.213) | 1080 newDci.m_rbBitmap = 0; // TBD (32 bit bitmap see 7.1.6 of 36.213) |
1082 uint32_t rbgMask = 0; | 1081 uint32_t rbgMask = 0; |
1083 for (uint16_t k = 0; k < (*itMap).second.size (); k++) | 1082 for (uint16_t k = 0; k < (*itMap).second.size (); k++) |
1084 { | 1083 { |
1085 rbgMask = rbgMask + (0x1 << (*itMap).second.at (k)); | 1084 rbgMask = rbgMask + (0x1 << (*itMap).second.at (k)); |
1086 NS_LOG_INFO (this << " Allocated RBG " << (*itMap).second.at (k)); | 1085 NS_LOG_INFO (this << " Allocated RBG " << (*itMap).second.at (k)); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1148 { | 1147 { |
1149 NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t
)newEl.m_rnti); | 1148 NS_FATAL_ERROR ("Unable to find HARQ timer for RNTI " << (uint16_t
)newEl.m_rnti); |
1150 } | 1149 } |
1151 (*itHarqTimer).second.at (newDci.m_harqProcess) = 0; | 1150 (*itHarqTimer).second.at (newDci.m_harqProcess) = 0; |
1152 } | 1151 } |
1153 | 1152 |
1154 // ...more parameters -> ingored in this version | 1153 // ...more parameters -> ingored in this version |
1155 | 1154 |
1156 ret.m_buildDataList.push_back (newEl); | 1155 ret.m_buildDataList.push_back (newEl); |
1157 // update UE stats | 1156 // update UE stats |
1158 std::map <uint16_t, fdbetsFlowPerf_t>::iterator it1; | 1157 std::map <uint16_t, fdbetsFlowPerf_t>::iterator fit; |
1159 it1 = m_flowStatsDl.find ((*itMap).first); | 1158 fit = m_flowStatsDl.find ((*itMap).first); |
1160 if (it1 != m_flowStatsDl.end ()) | 1159 if (fit != m_flowStatsDl.end ()) |
1161 { | 1160 { |
1162 (*it1).second.lastTtiBytesTrasmitted = bytesTxed; | 1161 (*fit).second.lastTtiBytesTrasmitted = bytesTxed; |
1163 NS_LOG_INFO (this << " UE total bytes txed " << (*it1).second.lastTtiB
ytesTrasmitted); | 1162 NS_LOG_INFO (this << " UE total bytes txed " << (*fit).second.lastTtiB
ytesTrasmitted); |
1164 } | 1163 } |
1165 else | 1164 else |
1166 { | 1165 { |
1167 NS_FATAL_ERROR (this << " No Stats for this allocated UE"); | 1166 NS_FATAL_ERROR (this << " No Stats for this allocated UE"); |
1168 } | 1167 } |
1169 | 1168 |
1170 itMap++; | 1169 itMap++; |
1171 } // end while allocation | 1170 } // end while allocation |
1172 ret.m_nrOfPdcchOfdmSymbols = 1; /// \todo check correct value according the
DCIs txed | 1171 ret.m_nrOfPdcchOfdmSymbols = 1; /// \todo check correct value according the
DCIs txed |
1173 | 1172 |
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1416 { | 1415 { |
1417 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> >
(params.m_sfnSf, rbgAllocationMap)); | 1416 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> >
(params.m_sfnSf, rbgAllocationMap)); |
1418 m_schedSapUser->SchedUlConfigInd (ret); | 1417 m_schedSapUser->SchedUlConfigInd (ret); |
1419 } | 1418 } |
1420 | 1419 |
1421 return; // no flows to be scheduled | 1420 return; // no flows to be scheduled |
1422 } | 1421 } |
1423 | 1422 |
1424 | 1423 |
1425 // Divide the remaining resources equally among the active users starting from
the subsequent one served last scheduling trigger | 1424 // Divide the remaining resources equally among the active users starting from
the subsequent one served last scheduling trigger |
1426 uint16_t rbPerFlow = static_cast<uint16_t>((m_cschedCellConfig.m_ulBandwidth)
/ (nflows + rntiAllocated.size ())); | 1425 uint16_t rbPerFlow = static_cast<uint16_t> ((m_cschedCellConfig.m_ulBandwidth)
/ (nflows + rntiAllocated.size ())); |
1427 if (rbPerFlow < 3) | 1426 if (rbPerFlow < 3) |
1428 { | 1427 { |
1429 rbPerFlow = 3; // at least 3 rbg per flow (till available resource) to en
sure TxOpportunity >= 7 bytes | 1428 rbPerFlow = 3; // at least 3 rbg per flow (till available resource) to en
sure TxOpportunity >= 7 bytes |
1430 } | 1429 } |
1431 int rbAllocated = 0; | 1430 uint16_t rbAllocated = 0; |
1432 | 1431 |
1433 std::map <uint16_t, fdbetsFlowPerf_t>::iterator itStats; | 1432 std::map <uint16_t, fdbetsFlowPerf_t>::iterator itStats; |
1434 if (m_nextRntiUl != 0) | 1433 if (m_nextRntiUl != 0) |
1435 { | 1434 { |
1436 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) | 1435 for (it = m_ceBsrRxed.begin (); it != m_ceBsrRxed.end (); it++) |
1437 { | 1436 { |
1438 if ((*it).first == m_nextRntiUl) | 1437 if ((*it).first == m_nextRntiUl) |
1439 { | 1438 { |
1440 break; | 1439 break; |
1441 } | 1440 } |
(...skipping 19 matching lines...) Expand all Loading... |
1461 if (it == m_ceBsrRxed.end ()) | 1460 if (it == m_ceBsrRxed.end ()) |
1462 { | 1461 { |
1463 // restart from the first | 1462 // restart from the first |
1464 it = m_ceBsrRxed.begin (); | 1463 it = m_ceBsrRxed.begin (); |
1465 } | 1464 } |
1466 continue; | 1465 continue; |
1467 } | 1466 } |
1468 if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth) | 1467 if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth) |
1469 { | 1468 { |
1470 // limit to physical resources last resource assignment | 1469 // limit to physical resources last resource assignment |
1471 rbPerFlow = static_cast<uint16_t>(m_cschedCellConfig.m_ulBandwidth - r
bAllocated); | 1470 rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated; |
1472 // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes | 1471 // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes |
1473 if (rbPerFlow < 3) | 1472 if (rbPerFlow < 3) |
1474 { | 1473 { |
1475 // terminate allocation | 1474 // terminate allocation |
1476 rbPerFlow = 0; | 1475 rbPerFlow = 0; |
1477 } | 1476 } |
1478 } | 1477 } |
1479 | 1478 |
1480 UlDciListElement_s uldci; | 1479 UlDciListElement_s uldci; |
1481 uldci.m_rnti = (*it).first; | 1480 uldci.m_rnti = (*it).first; |
1482 uldci.m_rbLen = static_cast<uint8_t>(rbPerFlow); | 1481 uldci.m_rbLen = static_cast<uint8_t> (rbPerFlow); |
1483 uldci.m_rbStart = 0; | 1482 uldci.m_rbStart = 0; |
1484 bool allocated = false; | 1483 bool allocated = false; |
1485 NS_LOG_INFO (this << " RB Allocated " << rbAllocated << " rbPerFlow " << r
bPerFlow << " flows " << nflows); | 1484 NS_LOG_INFO (this << " RB Allocated " << rbAllocated << " rbPerFlow " << r
bPerFlow << " flows " << nflows); |
1486 while ((!allocated)&&((rbAllocated + rbPerFlow - m_cschedCellConfig.m_ulBa
ndwidth) < 1) && (rbPerFlow != 0)) | 1485 while ((!allocated)&&((rbAllocated + rbPerFlow - m_cschedCellConfig.m_ulBa
ndwidth) < 1) && (rbPerFlow != 0)) |
1487 { | 1486 { |
1488 // check availability | 1487 // check availability |
1489 bool free = true; | 1488 bool free = true; |
1490 for (uint16_t j = static_cast<uint16_t>(rbAllocated); j < static_cast<
uint16_t>(rbAllocated + rbPerFlow); j++) | 1489 for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++) |
1491 { | 1490 { |
1492 if (rbMap.at (j) == true) | 1491 if (rbMap.at (j) == true) |
1493 { | 1492 { |
1494 free = false; | 1493 free = false; |
1495 break; | 1494 break; |
1496 } | 1495 } |
1497 } | 1496 } |
1498 if (free) | 1497 if (free) |
1499 { | 1498 { |
1500 uldci.m_rbStart = static_cast<uint8_t>(rbAllocated); | 1499 uldci.m_rbStart = static_cast<uint8_t> (rbAllocated); |
1501 | 1500 |
1502 for (uint16_t j = static_cast<uint16_t>(rbAllocated); j < static_c
ast<uint16_t>(rbAllocated + rbPerFlow); j++) | 1501 for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++) |
1503 { | 1502 { |
1504 rbMap.at (j) = true; | 1503 rbMap.at (j) = true; |
1505 // store info on allocation for managing ul-cqi interpretation | 1504 // store info on allocation for managing ul-cqi interpretation |
1506 rbgAllocationMap.at (j) = (*it).first; | 1505 rbgAllocationMap.at (j) = (*it).first; |
1507 } | 1506 } |
1508 rbAllocated += rbPerFlow; | 1507 rbAllocated += rbPerFlow; |
1509 allocated = true; | 1508 allocated = true; |
1510 break; | 1509 break; |
1511 } | 1510 } |
1512 rbAllocated++; | 1511 rbAllocated++; |
1513 if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth) | 1512 if (rbAllocated + rbPerFlow - 1 > m_cschedCellConfig.m_ulBandwidth) |
1514 { | 1513 { |
1515 // limit to physical resources last resource assignment | 1514 // limit to physical resources last resource assignment |
1516 rbPerFlow = static_cast<uint16_t>(m_cschedCellConfig.m_ulBandwidth
- rbAllocated); | 1515 rbPerFlow = m_cschedCellConfig.m_ulBandwidth - rbAllocated; |
1517 // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes | 1516 // at least 3 rbg per flow to ensure TxOpportunity >= 7 bytes |
1518 if (rbPerFlow < 3) | 1517 if (rbPerFlow < 3) |
1519 { | 1518 { |
1520 // terminate allocation | 1519 // terminate allocation |
1521 rbPerFlow = 0; | 1520 rbPerFlow = 0; |
1522 } | 1521 } |
1523 } | 1522 } |
1524 } | 1523 } |
1525 if (!allocated) | 1524 if (!allocated) |
1526 { | 1525 { |
1527 // unable to allocate new resource: finish scheduling | 1526 // unable to allocate new resource: finish scheduling |
1528 m_nextRntiUl = (*it).first; | 1527 m_nextRntiUl = (*it).first; |
1529 if (ret.m_dciList.size () > 0) | 1528 if (ret.m_dciList.size () > 0) |
1530 { | 1529 { |
1531 m_schedSapUser->SchedUlConfigInd (ret); | 1530 m_schedSapUser->SchedUlConfigInd (ret); |
1532 } | 1531 } |
1533 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> >
(params.m_sfnSf, rbgAllocationMap)); | 1532 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> >
(params.m_sfnSf, rbgAllocationMap)); |
1534 return; | 1533 return; |
1535 } | 1534 } |
| 1535 |
| 1536 |
1536 | 1537 |
1537 std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find
((*it).first); | 1538 std::map <uint16_t, std::vector <double> >::iterator itCqi = m_ueCqi.find
((*it).first); |
1538 int cqi = 0; | 1539 int cqi = 0; |
1539 if (itCqi == m_ueCqi.end ()) | 1540 if (itCqi == m_ueCqi.end ()) |
1540 { | 1541 { |
1541 // no cqi info about this UE | 1542 // no cqi info about this UE |
1542 uldci.m_mcs = 0; // MCS 0 -> UL-AMC TBD | 1543 uldci.m_mcs = 0; // MCS 0 -> UL-AMC TBD |
1543 } | 1544 } |
1544 else | 1545 else |
1545 { | 1546 { |
(...skipping 30 matching lines...) Expand all Loading... |
1576 it = m_ceBsrRxed.begin (); | 1577 it = m_ceBsrRxed.begin (); |
1577 } | 1578 } |
1578 NS_LOG_DEBUG (this << " UE discared for CQI=0, RNTI " << uldci.m_r
nti); | 1579 NS_LOG_DEBUG (this << " UE discared for CQI=0, RNTI " << uldci.m_r
nti); |
1579 // remove UE from allocation map | 1580 // remove UE from allocation map |
1580 for (uint16_t i = uldci.m_rbStart; i < uldci.m_rbStart + uldci.m_r
bLen; i++) | 1581 for (uint16_t i = uldci.m_rbStart; i < uldci.m_rbStart + uldci.m_r
bLen; i++) |
1581 { | 1582 { |
1582 rbgAllocationMap.at (i) = 0; | 1583 rbgAllocationMap.at (i) = 0; |
1583 } | 1584 } |
1584 continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 3
6.213) | 1585 continue; // CQI == 0 means "out of range" (see table 7.2.3-1 of 3
6.213) |
1585 } | 1586 } |
1586 uldci.m_mcs = static_cast<uint8_t>(m_amc->GetMcsFromCqi (cqi)); | 1587 uldci.m_mcs = m_amc->GetMcsFromCqi (cqi); |
1587 } | 1588 } |
1588 | 1589 |
1589 uldci.m_tbSize = static_cast<uint16_t>(m_amc->GetTbSizeFromMcs (uldci.m_mc
s, rbPerFlow) / 8); | 1590 uldci.m_tbSize = (m_amc->GetUlTbSizeFromMcs (uldci.m_mcs, rbPerFlow) / 8); |
1590 UpdateUlRlcBufferInfo (uldci.m_rnti, uldci.m_tbSize); | 1591 UpdateUlRlcBufferInfo (uldci.m_rnti, uldci.m_tbSize); |
1591 uldci.m_ndi = 1; | 1592 uldci.m_ndi = 1; |
1592 uldci.m_cceIndex = 0; | 1593 uldci.m_cceIndex = 0; |
1593 uldci.m_aggrLevel = 1; | 1594 uldci.m_aggrLevel = 1; |
1594 uldci.m_ueTxAntennaSelection = 3; // antenna selection OFF | 1595 uldci.m_ueTxAntennaSelection = 3; // antenna selection OFF |
1595 uldci.m_hopping = false; | 1596 uldci.m_hopping = false; |
1596 uldci.m_n2Dmrs = 0; | 1597 uldci.m_n2Dmrs = 0; |
1597 uldci.m_tpc = 0; // no power control | 1598 uldci.m_tpc = 0; // no power control |
1598 uldci.m_cqiRequest = false; // only period CQI at this stage | 1599 uldci.m_cqiRequest = false; // only period CQI at this stage |
1599 uldci.m_ulIndex = 0; // TDD parameter | 1600 uldci.m_ulIndex = 0; // TDD parameter |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1671 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params
.m_sfnSf, rbgAllocationMap)); | 1672 m_allocationMaps.insert (std::pair <uint16_t, std::vector <uint16_t> > (params
.m_sfnSf, rbgAllocationMap)); |
1672 m_schedSapUser->SchedUlConfigInd (ret); | 1673 m_schedSapUser->SchedUlConfigInd (ret); |
1673 | 1674 |
1674 return; | 1675 return; |
1675 } | 1676 } |
1676 | 1677 |
1677 void | 1678 void |
1678 FdBetFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapPr
ovider::SchedUlNoiseInterferenceReqParameters& params) | 1679 FdBetFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapPr
ovider::SchedUlNoiseInterferenceReqParameters& params) |
1679 { | 1680 { |
1680 NS_LOG_FUNCTION (this); | 1681 NS_LOG_FUNCTION (this); |
1681 NS_UNUSED(params); | 1682 NS_UNUSED (params); |
1682 return; | 1683 return; |
1683 } | 1684 } |
1684 | 1685 |
1685 void | 1686 void |
1686 FdBetFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::Sch
edUlSrInfoReqParameters& params) | 1687 FdBetFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::Sch
edUlSrInfoReqParameters& params) |
1687 { | 1688 { |
1688 NS_LOG_FUNCTION (this); | 1689 NS_LOG_FUNCTION (this); |
1689 NS_UNUSED(params); | 1690 NS_UNUSED (params); |
1690 return; | 1691 return; |
1691 } | 1692 } |
1692 | 1693 |
1693 void | 1694 void |
1694 FdBetFfMacScheduler::DoSchedUlMacCtrlInfoReq (const struct FfMacSchedSapProvider
::SchedUlMacCtrlInfoReqParameters& params) | 1695 FdBetFfMacScheduler::DoSchedUlMacCtrlInfoReq (const struct FfMacSchedSapProvider
::SchedUlMacCtrlInfoReqParameters& params) |
1695 { | 1696 { |
1696 NS_LOG_FUNCTION (this); | 1697 NS_LOG_FUNCTION (this); |
1697 | 1698 |
1698 std::map <uint16_t,uint32_t>::iterator it; | 1699 std::map <uint16_t,uint32_t>::iterator it; |
1699 | 1700 |
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2047 { | 2048 { |
2048 NS_LOG_FUNCTION (this << " RNTI " << rnti << " txMode " << (uint16_t)txMode); | 2049 NS_LOG_FUNCTION (this << " RNTI " << rnti << " txMode " << (uint16_t)txMode); |
2049 FfMacCschedSapUser::CschedUeConfigUpdateIndParameters params; | 2050 FfMacCschedSapUser::CschedUeConfigUpdateIndParameters params; |
2050 params.m_rnti = rnti; | 2051 params.m_rnti = rnti; |
2051 params.m_transmissionMode = txMode; | 2052 params.m_transmissionMode = txMode; |
2052 m_cschedSapUser->CschedUeConfigUpdateInd (params); | 2053 m_cschedSapUser->CschedUeConfigUpdateInd (params); |
2053 } | 2054 } |
2054 | 2055 |
2055 | 2056 |
2056 } | 2057 } |
LEFT | RIGHT |