Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(1327)

Unified Diff: src/lte/model/pss-ff-mac-scheduler.cc

Issue 338840043: Eliminate Visual Studio compiler warnings (Closed)
Patch Set: Patch updates for x64 build Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/lte/model/pf-ff-mac-scheduler.cc ('k') | src/lte/model/rr-ff-mac-scheduler.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/lte/model/pss-ff-mac-scheduler.cc
===================================================================
--- a/src/lte/model/pss-ff-mac-scheduler.cc
+++ b/src/lte/model/pss-ff-mac-scheduler.cc
@@ -179,7 +179,7 @@
std::map <uint16_t,uint8_t>::iterator it = m_uesTxMode.find (params.m_rnti);
if (it == m_uesTxMode.end ())
{
- m_uesTxMode.insert (std::pair <uint16_t, double> (params.m_rnti, params.m_transmissionMode));
+ m_uesTxMode.insert (std::pair <uint16_t, uint8_t> (params.m_rnti, params.m_transmissionMode));
// generate HARQ buffers
m_dlHarqCurrentProcessId.insert (std::pair <uint16_t,uint8_t > (params.m_rnti, 0));
DlHarqProcessesStatus_t dlHarqPrcStatus;
@@ -223,8 +223,8 @@
if (it == m_flowStatsDl.end ())
{
- double tbrDlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateDl / 8; // byte/s
- double tbrUlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateUl / 8; // byte/s
+ double tbrDlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateDl / 8.0; // byte/s
+ double tbrUlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateUl / 8.0; // byte/s
pssFlowPerf_t flowStatsDl;
flowStatsDl.flowStart = Simulator::Now ();
@@ -246,8 +246,8 @@
else
{
// update GBR from UeManager::SetupDataRadioBearer ()
- double tbrDlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateDl / 8; // byte/s
- double tbrUlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateUl / 8; // byte/s
+ double tbrDlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateDl / 8.0; // byte/s
+ double tbrUlInBytes = params.m_logicalChannelConfigList.at (i).m_eRabGuaranteedBitrateUl / 8.0; // byte/s
m_flowStatsDl[(*it).first].targetThroughput = tbrDlInBytes;
m_flowStatsUl[(*it).first].targetThroughput = tbrUlInBytes;
}
@@ -351,6 +351,7 @@
{
NS_LOG_FUNCTION (this);
NS_FATAL_ERROR ("method not implemented");
+ NS_UNUSED(params);
return;
}
@@ -359,6 +360,7 @@
{
NS_LOG_FUNCTION (this);
NS_FATAL_ERROR ("method not implemented");
+ NS_UNUSED(params);
return;
}
@@ -540,8 +542,7 @@
FfMacSchedSapUser::SchedDlConfigIndParameters ret;
// update UL HARQ proc id
- std::map <uint16_t, uint8_t>::iterator itProcId;
- for (itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
+ for (std::map <uint16_t, uint8_t>::iterator itProcId = m_ulHarqCurrentProcessId.begin (); itProcId != m_ulHarqCurrentProcessId.end (); itProcId++)
{
(*itProcId).second = ((*itProcId).second + 1) % HARQ_PROC_NUM;
}
@@ -553,9 +554,9 @@
ulRbMap = m_ffrSapProvider->GetAvailableUlRbg ();
uint8_t maxContinuousUlBandwidth = 0;
uint8_t tmpMinBandwidth = 0;
- uint16_t ffrRbStartOffset = 0;
- uint16_t tmpFfrRbStartOffset = 0;
- uint16_t index = 0;
+ uint8_t ffrRbStartOffset = 0;
+ uint8_t tmpFfrRbStartOffset = 0;
+ uint8_t index = 0;
for (std::vector<bool>::iterator it = ulRbMap.begin (); it != ulRbMap.end (); it++)
{
@@ -587,7 +588,7 @@
}
m_rachAllocationMap.resize (m_cschedCellConfig.m_ulBandwidth, 0);
- uint16_t rbStart = 0;
+ uint8_t rbStart = 0;
rbStart = ffrRbStartOffset;
std::vector <struct RachListElement_s>::iterator itRach;
for (itRach = m_rachList.begin (); itRach != m_rachList.end (); itRach++)
@@ -600,7 +601,7 @@
// UL-RACH Allocation
newRar.m_grant.m_rnti = newRar.m_rnti;
newRar.m_grant.m_mcs = m_ulGrantMcs;
- uint16_t rbLen = 1;
+ uint8_t rbLen = 1;
uint16_t tbSizeBits = 0;
// find lowest TB size that fits UL grant estimated size
while ((tbSizeBits < (*itRach).m_estimatedSize) && (rbStart + rbLen < (ffrRbStartOffset + maxContinuousUlBandwidth)))
@@ -702,7 +703,7 @@
// RNTI already allocated for retx
continue;
}
- uint8_t nLayers = m_dlInfoListBuffered.at (i).m_harqStatus.size ();
+ uint8_t nLayers = static_cast<uint8_t>(m_dlInfoListBuffered.at (i).m_harqStatus.size ());
std::vector <bool> retx;
NS_LOG_INFO (this << " Processing DLHARQ feedback");
if (nLayers == 1)
@@ -799,8 +800,8 @@
{
// find RBGs for sending HARQ retx
uint8_t j = 0;
- uint8_t rbgId = (dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum;
- uint8_t startRbg = dciRbg.at (dciRbg.size () - 1);
+ uint8_t rbgId = static_cast<uint8_t>((dciRbg.at (dciRbg.size () - 1) + 1) % rbgNum);
+ uint8_t startRbg = static_cast<uint8_t>(dciRbg.at (dciRbg.size () - 1));
std::vector <bool> rbgMapCopy = rbgMap;
while ((j < dciRbg.size ())&&(startRbg != rbgId))
{
@@ -946,12 +947,11 @@
}
- std::map <uint16_t, pssFlowPerf_t>::iterator it;
std::map <uint16_t, pssFlowPerf_t> tdUeSet; // the result of TD scheduler
// schedulability check
std::map <uint16_t, pssFlowPerf_t> ueSet;
- for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it++)
{
if( LcActivePerFlow ((*it).first) > 0 )
{
@@ -965,7 +965,7 @@
// Time Domain scheduler
std::vector <std::pair<double, uint16_t> > ueSet1;
std::vector <std::pair<double,uint16_t> > ueSet2;
- for (it = ueSet.begin (); it != ueSet.end (); it++)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = ueSet.begin (); it != ueSet.end (); it++)
{
std::set <uint16_t>::iterator itRnti = rntiAllocated.find ((*it).first);
if ((itRnti != rntiAllocated.end ())||(!HarqProcessAvailability ((*it).first)))
@@ -1046,8 +1046,7 @@
double achievableRate = 0.0;
for (uint8_t k = 0; k < nLayer; k++)
{
- uint8_t mcs = 0;
- mcs = m_amc->GetMcsFromCqi (wbCqi);
+ uint8_t mcs = m_amc->GetMcsFromCqi (wbCqi);
achievableRate += ((m_amc->GetDlTbSizeFromMcs (mcs, rbgSize) / 8) / 0.001); // = TB size / TTI
}
@@ -1077,7 +1076,7 @@
else
nMux = (int)((ueSet1.size() + ueSet2.size()) / 2) ; // TD scheduler only transfers half selected UE per RTT to TD scheduler
}
- for (it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it--)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = m_flowStatsDl.begin (); it != m_flowStatsDl.end (); it--)
{
std::vector <std::pair<double, uint16_t> >::iterator itSet;
for (itSet = ueSet1.begin (); itSet != ueSet1.end () && nMux != 0; itSet++)
@@ -1109,7 +1108,7 @@
{
// FD scheduler: Carrier over Interference to Average (CoItA)
std::map < uint16_t, uint8_t > sbCqiSum;
- for (it = tdUeSet.begin (); it != tdUeSet.end (); it++)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = tdUeSet.begin (); it != tdUeSet.end (); it++)
{
uint8_t sum = 0;
for (int i = 0; i < rbgNum; i++)
@@ -1165,14 +1164,14 @@
sbCqiSum.insert (std::pair<uint16_t, uint8_t> ((*it).first, sum));
}// end tdUeSet
- for (int i = 0; i < rbgNum; i++)
+ for (uint16_t i = 0; i < rbgNum; i++)
{
if (rbgMap.at (i) == true)
continue;
std::map <uint16_t, pssFlowPerf_t>::iterator itMax = tdUeSet.end ();
double metricMax = 0.0;
- for (it = tdUeSet.begin (); it != tdUeSet.end (); it++)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = tdUeSet.begin (); it != tdUeSet.end (); it++)
{
if ((m_ffrSapProvider->IsDlRbgAvailableForUe (i, (*it).first)) == false)
continue;
@@ -1263,14 +1262,14 @@
if ( m_fdSchedulerType.compare("PFsch") == 0)
{
// FD scheduler: Proportional Fair scheduled (PFsch)
- for (int i = 0; i < rbgNum; i++)
+ for (uint8_t i = 0; i < rbgNum; i++)
{
if (rbgMap.at (i) == true)
continue;
std::map <uint16_t, pssFlowPerf_t>::iterator itMax = tdUeSet.end ();
double metricMax = 0.0;
- for (it = tdUeSet.begin (); it != tdUeSet.end (); it++)
+ for (std::map <uint16_t, pssFlowPerf_t>::iterator it = tdUeSet.begin (); it != tdUeSet.end (); it++)
{
if ((m_ffrSapProvider->IsDlRbgAvailableForUe (i, (*it).first)) == false)
continue;
@@ -1379,14 +1378,14 @@
newDci.m_rnti = (*itMap).first;
newDci.m_harqProcess = UpdateHarqProcessId ((*itMap).first);
- uint16_t lcActives = LcActivePerFlow ((*itMap).first);
+ uint16_t lcActives = static_cast<uint16_t>(LcActivePerFlow ((*itMap).first));
NS_LOG_INFO (this << "Allocate user " << newEl.m_rnti << " rbg " << lcActives);
if (lcActives == 0)
{
// Set to max value, to avoid divide by 0 below
lcActives = (uint16_t)65535; // UINT16_MAX;
}
- uint16_t RgbPerRnti = (*itMap).second.size ();
+ uint16_t RgbPerRnti = static_cast<uint16_t>((*itMap).second.size ());
std::map <uint16_t,SbMeasResult_s>::iterator itCqi;
itCqi = m_a30CqiRxed.find ((*itMap).first);
std::map <uint16_t,uint8_t>::iterator itTxMode;
@@ -1445,7 +1444,7 @@
{
newDci.m_mcs.push_back (m_amc->GetMcsFromCqi (worstCqi.at (j)));
int tbSize = (m_amc->GetDlTbSizeFromMcs (newDci.m_mcs.at (j), RgbPerRnti * rbgSize) / 8); // (size of TB in bytes according to table 7.1.7.2.1-1 of 36.213)
- newDci.m_tbsSize.push_back (tbSize);
+ newDci.m_tbsSize.push_back (static_cast<uint16_t>(tbSize));
NS_LOG_INFO (this << " Layer " << (uint16_t)j << " MCS selected" << m_amc->GetMcsFromCqi (worstCqi.at (j)));
bytesTxed += tbSize;
}
@@ -1534,8 +1533,6 @@
{
(*it).second.lastTtiBytesTransmitted = bytesTxed;
NS_LOG_INFO (this << " UE total bytes txed " << (*it).second.lastTtiBytesTransmitted);
-
-
}
else
{
@@ -1707,7 +1704,7 @@
}
uint8_t minContinuousUlBandwidth = m_ffrSapProvider->GetMinContinuousUlBandwidth ();
- uint8_t ffrUlBandwidth = m_cschedCellConfig.m_ulBandwidth - rbAllocatedNum;
+ uint8_t ffrUlBandwidth = static_cast<uint8_t>(m_cschedCellConfig.m_ulBandwidth - rbAllocatedNum);
// remove RACH allocation
@@ -1821,14 +1818,14 @@
// Divide the remaining resources equally among the active users starting from the subsequent one served last scheduling trigger
- uint16_t tempRbPerFlow = (ffrUlBandwidth) / (nflows + rntiAllocated.size ());
+ uint16_t tempRbPerFlow = static_cast<uint16_t>((ffrUlBandwidth) / (nflows + rntiAllocated.size ()));
uint16_t rbPerFlow = (minContinuousUlBandwidth < tempRbPerFlow) ? minContinuousUlBandwidth : tempRbPerFlow;
if (rbPerFlow < 3)
{
rbPerFlow = 3; // at least 3 rbg per flow (till available resource) to ensure TxOpportunity >= 7 bytes
}
- int rbAllocated = 0;
+ uint16_t rbAllocated = 0;
std::map <uint16_t, pssFlowPerf_t>::iterator itStats;
if (m_nextRntiUl != 0)
@@ -1880,7 +1877,8 @@
rbAllocated = 0;
UlDciListElement_s uldci;
uldci.m_rnti = (*it).first;
- uldci.m_rbLen = rbPerFlow;
+ uldci.m_rbLen = static_cast<uint8_t>(rbPerFlow);
+ uldci.m_rbStart = 0;
bool allocated = false;
NS_LOG_INFO (this << " RB Allocated " << rbAllocated << " rbPerFlow " << rbPerFlow << " flows " << nflows);
while ((!allocated)&&((rbAllocated + rbPerFlow - m_cschedCellConfig.m_ulBandwidth) < 1) && (rbPerFlow != 0))
@@ -1903,7 +1901,7 @@
if (free)
{
NS_LOG_INFO (this << "RNTI: "<< (*it).first<< " RB Allocated " << rbAllocated << " rbPerFlow " << rbPerFlow << " flows " << nflows);
- uldci.m_rbStart = rbAllocated;
+ uldci.m_rbStart = static_cast<uint8_t>(rbAllocated);
for (uint16_t j = rbAllocated; j < rbAllocated + rbPerFlow; j++)
{
@@ -2063,6 +2061,7 @@
PssFfMacScheduler::DoSchedUlNoiseInterferenceReq (const struct FfMacSchedSapProvider::SchedUlNoiseInterferenceReqParameters& params)
{
NS_LOG_FUNCTION (this);
+ NS_UNUSED(params);
return;
}
@@ -2070,6 +2069,7 @@
PssFfMacScheduler::DoSchedUlSrInfoReq (const struct FfMacSchedSapProvider::SchedUlSrInfoReqParameters& params)
{
NS_LOG_FUNCTION (this);
+ NS_UNUSED(params);
return;
}
« no previous file with comments | « src/lte/model/pf-ff-mac-scheduler.cc ('k') | src/lte/model/rr-ff-mac-scheduler.cc » ('j') | no next file with comments »

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b