0

私はc ++を実行していますが、コードでこれらのエラーが発生しています:

   new size of the done 64
   getting next item to do (0x90b9c0 0x90bab0 0x90be40 0)
   vector wait size = 0
   running size = 1
   done size = 64
   done this align #27# #26# Gorilla_gorilla_2
   setting the done tuple to 1 to get them next
   new size of the done 65
   getting next item to do (0xbff3897200000000 0xbfda1b75f84c0030 0x30 17)
   vector wait size = 18446744073709551615
   running size = 1
   done size = 65
   getting next item to do (0xbfdcc288867fc50b 0x31 0x1 1)
   /usr/local/netbeans-7.2rc1/ide/bin/nativeexecution/dorun.sh: line 33:  8234   
   Segmentation     
   fault      (core dumped) sh "${SHFILE}"

そして、原因となった行など、セグメント障害の詳細をどのように取得して修正できるのでしょうか。ありがとうございます。

編集:

  bool tuple_compare(boost::tuple< ppa::Node*, ppa::Node*, ppa::Node*, bool> tuple)
{



  if( boost::get< 3 >( tuple) == true)
 {
          return true;
 }
  else
  {
      return false;
  }
 }


void threaded_function(Model_factory &mf, ppa::Node *root)
 {
  try {


  while(true)
  {
      boost::mutex::scoped_lock lock(result_mutex);

      if(wait.empty())
      {

          lock.unlock();
          break;
      }
      else {
          lock.unlock();
          lock.lock();
          if(!running_jobs.empty())
          {
              cout << "vector wait size = " << wait.size() << endl;
              cout << "running size = " << running_jobs.size() << endl;
              cout << "done size = " << done.size() << endl;
              boost::tuple<ppa::Node*, ppa::Node*, ppa::Node*, bool> tuple = running_jobs.back();
              running_jobs.pop_back();
              lock.unlock();
              ppa::Node *father = boost::get<0>(tuple);
              ppa::Node *first_son = boost::get<1>(tuple);
              ppa::Node *second_son = boost::get<2>(tuple);
              bool flag = boost::get<3>(tuple);
              father->start_alignment_new(&mf);
              lock.lock();
              cout << "done this align " << father->get_name() << " " << first_son->get_name() << " " << second_son->get_name()
                      << endl;
              cout << "setting the done tuple to 1 to get them next" << endl;
              done.push_back(tuple);
              cout << "new size of the done " << done.size() << endl;
              lock.unlock();



          }
          else {
              lock.unlock();
              lock.lock();
              cout << "getting next item to do " << wait.back() << endl;
              boost::tuple<ppa::Node*, ppa::Node*, ppa::Node*, bool> run = wait.back();
              running_jobs.push_back(run);
              wait.pop_back();
              lock.unlock();
          }
      }
    }
  }
  catch (boost::lock_error& le)
  {
      cout << "error this " << le.what() << endl;
  }
  }

主要:

 int main(int argc, char *argv[])
 {
clock_t t_start=clock();
ppa::Node* parent;
ppa::Node* left;
ppa::Node* rigth;
ppa::Node* check_root;
/*

...

boost::tuple<ppa::Node*, ppa::Node*, ppa::Node*, bool>  tuple = dups.back();
    ppa::Node *n = boost::get<0>(tuple);
     //cout << "creating waiting thread" << endl;
   // g.create_thread( boost::bind(wait_function));
    cout << "creating other threads" << endl;
 for ( int i = 0; i < 5; ++i )
 {
   cout << "making  thread " << i << endl;
    g.create_thread( boost::bind( threaded_function, boost::ref(mf), boost::ref(n) ));
}



 cout << g.size() << endl ;
// wait for them
// sleep(15);
 g.join_all();     

私が追加した新しい部分であるこの部分から来ていると確信しています。

4

1 に答える 1

0

不適切なコード、ロックとロック解除が多すぎる、mutex には共有 lock.lock() を使用する必要のないロック メソッドがあります

于 2012-07-25T07:06:30.090 に答える