mirror of
https://gitlab.com/kicad/code/kicad.git
synced 2025-04-21 21:43:43 +00:00
ADDED: Threadpool
Thread pools are long-lasting executors that have close to zero overhead when launching new jobs. This is advantageous over creating new threads as we can use this for threading smalling jobs and smaller quanta. It also avoids the heuristics needed to determine the optimal number of threads to spawn
This commit is contained in:
parent
8418fe12d8
commit
03c279ffd4
common
cvpcb
eeschema
gerbview
include
kicad
pagelayout_editor
pcbnew
CMakeLists.txtboard.cpp
connectivity
drc
drc_cache_generator.cppdrc_engine.cppdrc_engine.hdrc_test_provider_disallow.cppdrc_test_provider_sliver_checker.cppdrc_test_provider_zone_connections.cpp
footprint_info_impl.cppfootprint_info_impl.hzone_filler.cppthirdparty
@ -65,6 +65,16 @@ install( TARGETS lib_kicad
|
||||
)
|
||||
endif()
|
||||
|
||||
# Build a single library for the thread pool that we can link around
|
||||
|
||||
add_library( threadpool STATIC
|
||||
thread_pool.cpp
|
||||
)
|
||||
|
||||
target_include_directories( threadpool PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
|
||||
# The build version string defaults to the value in the KiCadVersion.cmake file.
|
||||
# If being built inside a git repository, the git tag and commit hash are used to create
|
||||
@ -433,7 +443,7 @@ set( COMMON_SRCS
|
||||
add_library( common STATIC
|
||||
${COMMON_SRCS}
|
||||
)
|
||||
|
||||
|
||||
add_dependencies( common version_header )
|
||||
add_dependencies( common compoundfilereader ) # used by altium_parser.cpp
|
||||
|
||||
@ -443,6 +453,7 @@ target_link_libraries( common
|
||||
kiplatform
|
||||
gal
|
||||
scripting
|
||||
threadpool
|
||||
pybind11::embed
|
||||
compoundfilereader
|
||||
${Boost_LIBRARIES}
|
||||
@ -464,6 +475,7 @@ target_include_directories( common
|
||||
PUBLIC
|
||||
.
|
||||
${CMAKE_BINARY_DIR}
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
# text markup support
|
||||
@ -559,11 +571,16 @@ set_source_files_properties( ${PCB_COMMON_SRCS} PROPERTIES
|
||||
|
||||
add_library( pcbcommon STATIC ${PCB_COMMON_SRCS} )
|
||||
|
||||
target_include_directories( pcbcommon PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
target_link_libraries( pcbcommon PUBLIC
|
||||
common
|
||||
delaunator
|
||||
kimath
|
||||
kiplatform
|
||||
threadpool
|
||||
)
|
||||
|
||||
if( KICAD_USE_3DCONNEXION )
|
||||
|
@ -152,63 +152,3 @@ FOOTPRINT_LIST* FOOTPRINT_LIST::GetInstance( KIWAY& aKiway )
|
||||
|
||||
return footprintInfo;
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER::FOOTPRINT_ASYNC_LOADER() : m_list( nullptr )
|
||||
{
|
||||
m_total_libs = 0;
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER::~FOOTPRINT_ASYNC_LOADER()
|
||||
{
|
||||
// This is NOP if the load has finished
|
||||
Abort();
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::SetList( FOOTPRINT_LIST* aList )
|
||||
{
|
||||
m_list = aList;
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::Start( FP_LIB_TABLE* aTable, wxString const* aNickname,
|
||||
unsigned aNThreads )
|
||||
{
|
||||
// Disable KIID generation: not needed for library parts; sometimes very slow
|
||||
KIID::CreateNilUuids( true );
|
||||
|
||||
// Capture the FP_LIB_TABLE into m_last_table. Formatting it as a string instead of storing the
|
||||
// raw data avoids having to pull in the FP-specific parts.
|
||||
STRING_FORMATTER sof;
|
||||
aTable->Format( &sof, 0 );
|
||||
m_last_table = sof.GetString();
|
||||
|
||||
m_list->startWorkers( aTable, aNickname, this, aNThreads );
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_ASYNC_LOADER::Join()
|
||||
{
|
||||
if( m_list )
|
||||
{
|
||||
bool rv = m_list->joinWorkers();
|
||||
m_list = nullptr;
|
||||
return rv;
|
||||
}
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::Abort()
|
||||
{
|
||||
if( m_list )
|
||||
{
|
||||
m_list->stopWorkers();
|
||||
m_list = nullptr;
|
||||
}
|
||||
|
||||
KIID::CreateNilUuids( false );
|
||||
}
|
||||
|
@ -53,14 +53,15 @@
|
||||
#include <kiplatform/policy.h>
|
||||
#include <lockfile.h>
|
||||
#include <menus_helpers.h>
|
||||
#include <paths.h>
|
||||
#include <pgm_base.h>
|
||||
#include <policy_keys.h>
|
||||
#include <python_scripting.h>
|
||||
#include <settings/common_settings.h>
|
||||
#include <settings/settings_manager.h>
|
||||
#include <systemdirsappend.h>
|
||||
#include <thread_pool.h>
|
||||
#include <trace_helpers.h>
|
||||
#include <paths.h>
|
||||
#include <policy_keys.h>
|
||||
|
||||
#ifdef KICAD_USE_SENTRY
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
@ -392,6 +393,8 @@ bool PGM_BASE::InitPgm( bool aHeadless, bool aSkipPyInit )
|
||||
}
|
||||
#endif
|
||||
|
||||
GetKiCadThreadPool();
|
||||
|
||||
// Init KiCad environment
|
||||
// the environment variable KICAD (if exists) gives the kicad path:
|
||||
// something like set KICAD=d:\kicad
|
||||
|
33
common/thread_pool.cpp
Normal file
33
common/thread_pool.cpp
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* This program source code file is part of KiCad, a free EDA CAD application.
|
||||
*
|
||||
* Copyright (C) 2022 KiCad Developers, see CHANGELOG.TXT for contributors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 3
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you may find one here:
|
||||
* http://www.gnu.org/licenses/gpl-3.0.html
|
||||
* or you may search the http://www.gnu.org website for the version 3 license,
|
||||
* or you may write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
|
||||
#include <thread_pool.h>
|
||||
|
||||
|
||||
thread_pool& GetKiCadThreadPool()
|
||||
{
|
||||
static thread_pool tp;
|
||||
return tp;
|
||||
}
|
||||
|
@ -83,6 +83,10 @@ target_link_libraries( cvpcb_kiface
|
||||
# Must follow github_plugin
|
||||
target_link_libraries( cvpcb_kiface ${Boost_LIBRARIES} )
|
||||
|
||||
target_include_directories( cvpcb_kiface PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( UNIX AND NOT APPLE )
|
||||
# -lrt must follow Boost
|
||||
target_link_libraries( cvpcb_kiface rt )
|
||||
|
@ -396,7 +396,7 @@ target_link_libraries( eeschema
|
||||
common
|
||||
${wxWidgets_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
# the main Eeschema program, in DSO form.
|
||||
add_library( eeschema_kiface_objects OBJECT
|
||||
${EESCHEMA_SRCS}
|
||||
@ -406,12 +406,14 @@ add_library( eeschema_kiface_objects OBJECT
|
||||
target_include_directories( eeschema_kiface_objects
|
||||
PUBLIC
|
||||
.
|
||||
netlist_exporters )
|
||||
netlist_exporters
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
target_link_libraries( eeschema_kiface_objects
|
||||
PUBLIC
|
||||
common )
|
||||
|
||||
|
||||
# Since we're not using target_link_libraries, we need to explicitly
|
||||
# declare the dependency
|
||||
add_dependencies( eeschema_kiface_objects common )
|
||||
|
@ -21,7 +21,6 @@
|
||||
*/
|
||||
|
||||
#include <list>
|
||||
#include <thread>
|
||||
#include <future>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
@ -44,6 +43,7 @@
|
||||
#include <connection_graph.h>
|
||||
#include <widgets/ui_common.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <advanced_config.h> // for realtime connectivity switch in release builds
|
||||
@ -600,141 +600,125 @@ void CONNECTION_GRAPH::updateItemConnectivity( const SCH_SHEET_PATH& aSheet,
|
||||
// Pre-scan to see if we have a bus at this location
|
||||
SCH_LINE* busLine = aSheet.LastScreen()->GetBus( it.first );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 4 items (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( connection_vec.size() + 3 ) / 4 );
|
||||
|
||||
std::atomic<size_t> nextItem( 0 );
|
||||
std::mutex update_mutex;
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto update_lambda = [&]() -> size_t
|
||||
auto update_lambda = [&]( SCH_ITEM* connected_item ) -> size_t
|
||||
{
|
||||
for( size_t ii = nextItem++; ii < connection_vec.size(); ii = nextItem++ )
|
||||
// Bus entries are special: they can have connection points in the
|
||||
// middle of a wire segment, because the junction algo doesn't split
|
||||
// the segment in two where you place a bus entry. This means that
|
||||
// bus entries that don't land on the end of a line segment need to
|
||||
// have "virtual" connection points to the segments they graphically
|
||||
// touch.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
SCH_ITEM* connected_item = connection_vec[ii];
|
||||
// Bus entries are special: they can have connection points in the
|
||||
// middle of a wire segment, because the junction algo doesn't split
|
||||
// the segment in two where you place a bus entry. This means that
|
||||
// bus entries that don't land on the end of a line segment need to
|
||||
// have "virtual" connection points to the segments they graphically
|
||||
// touch.
|
||||
// If this location only has the connection point of the bus
|
||||
// entry itself, this means that either the bus entry is not
|
||||
// connected to anything graphically, or that it is connected to
|
||||
// a segment at some point other than at one of the endpoints.
|
||||
if( connection_vec.size() == 1 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = busLine;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bus-to-bus entries are treated just like bus wires
|
||||
else if( connected_item->Type() == SCH_BUS_BUS_ENTRY_T )
|
||||
{
|
||||
if( connection_vec.size() < 2 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_BUS_ENTRY*>( connected_item );
|
||||
|
||||
if( it.first == bus_entry->GetPosition() )
|
||||
bus_entry->m_connected_bus_items[0] = busLine;
|
||||
else
|
||||
bus_entry->m_connected_bus_items[1] = busLine;
|
||||
|
||||
std::lock_guard<std::mutex> lock( update_mutex );
|
||||
bus_entry->AddConnectionTo( aSheet, busLine );
|
||||
busLine->AddConnectionTo( aSheet, bus_entry );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Change junctions to be on bus junction layer if they are touching a bus
|
||||
else if( connected_item->Type() == SCH_JUNCTION_T )
|
||||
{
|
||||
connected_item->SetLayer( busLine ? LAYER_BUS_JUNCTION : LAYER_JUNCTION );
|
||||
}
|
||||
|
||||
SCH_ITEM_SET& connected_set = connected_item->ConnectedItems( aSheet );
|
||||
connected_set.reserve( connection_vec.size() );
|
||||
|
||||
for( SCH_ITEM* test_item : connection_vec )
|
||||
{
|
||||
bool bus_connection_ok = true;
|
||||
|
||||
if( test_item == connected_item )
|
||||
continue;
|
||||
|
||||
// Set up the link between the bus entry net and the bus
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
// If this location only has the connection point of the bus
|
||||
// entry itself, this means that either the bus entry is not
|
||||
// connected to anything graphically, or that it is connected to
|
||||
// a segment at some point other than at one of the endpoints.
|
||||
if( connection_vec.size() == 1 )
|
||||
if( test_item->GetLayer() == LAYER_BUS )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = busLine;
|
||||
}
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = test_item;
|
||||
}
|
||||
}
|
||||
|
||||
// Bus-to-bus entries are treated just like bus wires
|
||||
else if( connected_item->Type() == SCH_BUS_BUS_ENTRY_T )
|
||||
{
|
||||
if( connection_vec.size() < 2 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_BUS_ENTRY*>( connected_item );
|
||||
|
||||
if( it.first == bus_entry->GetPosition() )
|
||||
bus_entry->m_connected_bus_items[0] = busLine;
|
||||
else
|
||||
bus_entry->m_connected_bus_items[1] = busLine;
|
||||
|
||||
std::lock_guard<std::mutex> lock( update_mutex );
|
||||
bus_entry->AddConnectionTo( aSheet, busLine );
|
||||
busLine->AddConnectionTo( aSheet, bus_entry );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Change junctions to be on bus junction layer if they are touching a bus
|
||||
else if( connected_item->Type() == SCH_JUNCTION_T )
|
||||
{
|
||||
connected_item->SetLayer( busLine ? LAYER_BUS_JUNCTION : LAYER_JUNCTION );
|
||||
}
|
||||
|
||||
SCH_ITEM_SET& connected_set = connected_item->ConnectedItems( aSheet );
|
||||
connected_set.reserve( connection_vec.size() );
|
||||
|
||||
for( SCH_ITEM* test_item : connection_vec )
|
||||
{
|
||||
bool bus_connection_ok = true;
|
||||
|
||||
if( test_item == connected_item )
|
||||
continue;
|
||||
|
||||
// Set up the link between the bus entry net and the bus
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
if( test_item->GetLayer() == LAYER_BUS )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = test_item;
|
||||
}
|
||||
}
|
||||
|
||||
// Bus entries only connect to bus lines on the end that is touching a bus line.
|
||||
// If the user has overlapped another net line with the endpoint of the bus entry
|
||||
// where the entry connects to a bus, we don't want to short-circuit it.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || test_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
else if( test_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || connected_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
|
||||
if( connected_item->ConnectionPropagatesTo( test_item ) &&
|
||||
test_item->ConnectionPropagatesTo( connected_item ) &&
|
||||
bus_connection_ok )
|
||||
{
|
||||
connected_set.push_back( test_item );
|
||||
}
|
||||
}
|
||||
|
||||
// If we got this far and did not find a connected bus item for a bus entry,
|
||||
// we should do a manual scan in case there is a bus item on this connection
|
||||
// point but we didn't pick it up earlier because there is *also* a net item here.
|
||||
// Bus entries only connect to bus lines on the end that is touching a bus line.
|
||||
// If the user has overlapped another net line with the endpoint of the bus entry
|
||||
// where the entry connects to a bus, we don't want to short-circuit it.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_connection_ok = !busLine || test_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
else if( test_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || connected_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
|
||||
if( !bus_entry->m_connected_bus_item )
|
||||
{
|
||||
SCH_SCREEN* screen = aSheet.LastScreen();
|
||||
SCH_LINE* bus = screen->GetBus( it.first );
|
||||
if( connected_item->ConnectionPropagatesTo( test_item ) &&
|
||||
test_item->ConnectionPropagatesTo( connected_item ) &&
|
||||
bus_connection_ok )
|
||||
{
|
||||
connected_set.push_back( test_item );
|
||||
}
|
||||
}
|
||||
|
||||
if( bus )
|
||||
bus_entry->m_connected_bus_item = bus;
|
||||
}
|
||||
// If we got this far and did not find a connected bus item for a bus entry,
|
||||
// we should do a manual scan in case there is a bus item on this connection
|
||||
// point but we didn't pick it up earlier because there is *also* a net item here.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
|
||||
if( !bus_entry->m_connected_bus_item )
|
||||
{
|
||||
SCH_SCREEN* screen = aSheet.LastScreen();
|
||||
SCH_LINE* bus = screen->GetBus( it.first );
|
||||
|
||||
if( bus )
|
||||
bus_entry->m_connected_bus_item = bus;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
update_lambda();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, connection_vec.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
update_lambda( connection_vec[ii] );
|
||||
}).wait();
|
||||
}
|
||||
}
|
||||
|
||||
@ -832,13 +816,6 @@ void CONNECTION_GRAPH::buildItemSubGraphs()
|
||||
void CONNECTION_GRAPH::resolveAllDrivers()
|
||||
{
|
||||
// Resolve drivers for subgraphs and propagate connectivity info
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( m_subgraphs.size() + 3 ) / 4 );
|
||||
|
||||
std::atomic<size_t> nextSubgraph( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
std::vector<CONNECTION_SUBGRAPH*> dirty_graphs;
|
||||
|
||||
std::copy_if( m_subgraphs.begin(), m_subgraphs.end(), std::back_inserter( dirty_graphs ),
|
||||
@ -847,61 +824,53 @@ void CONNECTION_GRAPH::resolveAllDrivers()
|
||||
return candidate->m_dirty;
|
||||
} );
|
||||
|
||||
auto update_lambda = [&nextSubgraph, &dirty_graphs]() -> size_t
|
||||
std::vector<std::future<size_t>> returns( dirty_graphs.size() );
|
||||
|
||||
auto update_lambda = []( CONNECTION_SUBGRAPH* subgraph ) -> size_t
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++; subgraphId < dirty_graphs.size(); subgraphId = nextSubgraph++ )
|
||||
if( !subgraph->m_dirty )
|
||||
return 0;
|
||||
|
||||
// Special processing for some items
|
||||
for( auto item : subgraph->m_items )
|
||||
{
|
||||
auto subgraph = dirty_graphs[subgraphId];
|
||||
|
||||
if( !subgraph->m_dirty )
|
||||
continue;
|
||||
|
||||
// Special processing for some items
|
||||
for( auto item : subgraph->m_items )
|
||||
switch( item->Type() )
|
||||
{
|
||||
switch( item->Type() )
|
||||
{
|
||||
case SCH_NO_CONNECT_T:
|
||||
case SCH_NO_CONNECT_T:
|
||||
subgraph->m_no_connect = item;
|
||||
break;
|
||||
|
||||
case SCH_BUS_WIRE_ENTRY_T:
|
||||
subgraph->m_bus_entry = item;
|
||||
break;
|
||||
|
||||
case SCH_PIN_T:
|
||||
{
|
||||
auto pin = static_cast<SCH_PIN*>( item );
|
||||
|
||||
if( pin->GetType() == ELECTRICAL_PINTYPE::PT_NC )
|
||||
subgraph->m_no_connect = item;
|
||||
break;
|
||||
|
||||
case SCH_BUS_WIRE_ENTRY_T:
|
||||
subgraph->m_bus_entry = item;
|
||||
break;
|
||||
|
||||
case SCH_PIN_T:
|
||||
{
|
||||
auto pin = static_cast<SCH_PIN*>( item );
|
||||
|
||||
if( pin->GetType() == ELECTRICAL_PINTYPE::PT_NC )
|
||||
subgraph->m_no_connect = item;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
subgraph->ResolveDrivers( true );
|
||||
subgraph->m_dirty = false;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
subgraph->ResolveDrivers( true );
|
||||
subgraph->m_dirty = false;
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
update_lambda();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, dirty_graphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
update_lambda( dirty_graphs[ii] );
|
||||
}).wait();
|
||||
|
||||
// Now discard any non-driven subgraphs from further consideration
|
||||
|
||||
@ -1434,39 +1403,12 @@ void CONNECTION_GRAPH::buildConnectionGraph()
|
||||
for( CONNECTION_SUBGRAPH* subgraph : m_driver_subgraphs )
|
||||
m_sheet_to_subgraphs_map[ subgraph->m_sheet ].emplace_back( subgraph );
|
||||
|
||||
// Update item connections at this point so that neighbor propagation works
|
||||
std::atomic<size_t> nextSubgraph( 0 );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( m_subgraphs.size() + 3 ) / 4 );
|
||||
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto preliminaryUpdateTask =
|
||||
[&]() -> size_t
|
||||
GetKiCadThreadPool().parallelize_loop( 0, m_driver_subgraphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++;
|
||||
subgraphId < m_driver_subgraphs.size();
|
||||
subgraphId = nextSubgraph++ )
|
||||
{
|
||||
m_driver_subgraphs[subgraphId]->UpdateItemConnections();
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
preliminaryUpdateTask();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, preliminaryUpdateTask );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
m_driver_subgraphs[ii]->UpdateItemConnections();
|
||||
}).wait();
|
||||
|
||||
// Next time through the subgraphs, we do some post-processing to handle things like
|
||||
// connecting bus members to their neighboring subgraphs, and then propagate connections
|
||||
@ -1595,84 +1537,71 @@ void CONNECTION_GRAPH::buildConnectionGraph()
|
||||
}
|
||||
}
|
||||
|
||||
nextSubgraph.store( 0 );
|
||||
|
||||
auto updateItemConnectionsTask =
|
||||
[&]() -> size_t
|
||||
[&]( CONNECTION_SUBGRAPH* subgraph ) -> size_t
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++;
|
||||
subgraphId < m_driver_subgraphs.size();
|
||||
subgraphId = nextSubgraph++ )
|
||||
// Make sure weakly-driven single-pin nets get the unconnected_ prefix
|
||||
if( !subgraph->m_strong_driver && subgraph->m_drivers.size() == 1 &&
|
||||
subgraph->m_driver->Type() == SCH_PIN_T )
|
||||
{
|
||||
CONNECTION_SUBGRAPH* subgraph = m_driver_subgraphs[subgraphId];
|
||||
SCH_PIN* pin = static_cast<SCH_PIN*>( subgraph->m_driver );
|
||||
wxString name = pin->GetDefaultNetName( subgraph->m_sheet, true );
|
||||
|
||||
// Make sure weakly-driven single-pin nets get the unconnected_ prefix
|
||||
if( !subgraph->m_strong_driver && subgraph->m_drivers.size() == 1 &&
|
||||
subgraph->m_driver->Type() == SCH_PIN_T )
|
||||
subgraph->m_driver_connection->ConfigureFromLabel( name );
|
||||
}
|
||||
|
||||
subgraph->m_dirty = false;
|
||||
subgraph->UpdateItemConnections();
|
||||
|
||||
// No other processing to do on buses
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
return 0;
|
||||
|
||||
// As a visual aid, we can check sheet pins that are driven by themselves to see
|
||||
// if they should be promoted to buses
|
||||
|
||||
if( subgraph->m_driver->Type() == SCH_SHEET_PIN_T )
|
||||
{
|
||||
SCH_SHEET_PIN* pin = static_cast<SCH_SHEET_PIN*>( subgraph->m_driver );
|
||||
|
||||
if( SCH_SHEET* sheet = pin->GetParent() )
|
||||
{
|
||||
SCH_PIN* pin = static_cast<SCH_PIN*>( subgraph->m_driver );
|
||||
wxString name = pin->GetDefaultNetName( subgraph->m_sheet, true );
|
||||
wxString pinText = pin->GetText();
|
||||
SCH_SCREEN* screen = sheet->GetScreen();
|
||||
|
||||
subgraph->m_driver_connection->ConfigureFromLabel( name );
|
||||
}
|
||||
|
||||
subgraph->m_dirty = false;
|
||||
subgraph->UpdateItemConnections();
|
||||
|
||||
// No other processing to do on buses
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
continue;
|
||||
|
||||
// As a visual aid, we can check sheet pins that are driven by themselves to see
|
||||
// if they should be promoted to buses
|
||||
|
||||
if( subgraph->m_driver->Type() == SCH_SHEET_PIN_T )
|
||||
{
|
||||
SCH_SHEET_PIN* pin = static_cast<SCH_SHEET_PIN*>( subgraph->m_driver );
|
||||
|
||||
if( SCH_SHEET* sheet = pin->GetParent() )
|
||||
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
|
||||
{
|
||||
wxString pinText = pin->GetText();
|
||||
SCH_SCREEN* screen = sheet->GetScreen();
|
||||
SCH_HIERLABEL* label = static_cast<SCH_HIERLABEL*>( item );
|
||||
|
||||
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
|
||||
if( label->GetText() == pinText )
|
||||
{
|
||||
SCH_HIERLABEL* label = static_cast<SCH_HIERLABEL*>( item );
|
||||
SCH_SHEET_PATH path = subgraph->m_sheet;
|
||||
path.push_back( sheet );
|
||||
|
||||
if( label->GetText() == pinText )
|
||||
{
|
||||
SCH_SHEET_PATH path = subgraph->m_sheet;
|
||||
path.push_back( sheet );
|
||||
SCH_CONNECTION* parent_conn = label->Connection( &path );
|
||||
|
||||
SCH_CONNECTION* parent_conn = label->Connection( &path );
|
||||
if( parent_conn && parent_conn->IsBus() )
|
||||
subgraph->m_driver_connection->SetType( CONNECTION_TYPE::BUS );
|
||||
|
||||
if( parent_conn && parent_conn->IsBus() )
|
||||
subgraph->m_driver_connection->SetType( CONNECTION_TYPE::BUS );
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
continue;
|
||||
}
|
||||
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
updateItemConnectionsTask();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, updateItemConnectionsTask );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, m_driver_subgraphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
updateItemConnectionsTask( m_driver_subgraphs[ii] );
|
||||
}).wait();
|
||||
|
||||
m_net_code_to_subgraphs_map.clear();
|
||||
m_net_name_to_subgraphs_map.clear();
|
||||
|
@ -120,6 +120,10 @@ target_link_libraries( gerbview
|
||||
${wxWidgets_LIBRARIES}
|
||||
)
|
||||
|
||||
target_include_directories( gerbview PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( MAKE_LINK_MAPS )
|
||||
set_target_properties( gerbview PROPERTIES
|
||||
LINK_FLAGS "-Wl,-cref,-Map=gerbview.map" )
|
||||
|
@ -43,7 +43,6 @@
|
||||
class FP_LIB_TABLE;
|
||||
class FOOTPRINT_LIST;
|
||||
class FOOTPRINT_LIST_IMPL;
|
||||
class FOOTPRINT_ASYNC_LOADER;
|
||||
class PROGRESS_REPORTER;
|
||||
class wxTopLevelWindow;
|
||||
class KIWAY;
|
||||
@ -268,27 +267,6 @@ public:
|
||||
*/
|
||||
static FOOTPRINT_LIST* GetInstance( KIWAY& aKiway );
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Launch worker threads to load footprints. Part of the #FOOTPRINT_ASYNC_LOADER
|
||||
* implementation.
|
||||
*/
|
||||
virtual void startWorkers( FP_LIB_TABLE* aTable, const wxString* aNickname,
|
||||
FOOTPRINT_ASYNC_LOADER* aLoader, unsigned aNThreads ) = 0;
|
||||
|
||||
/**
|
||||
* Join worker threads. Part of the FOOTPRINT_ASYNC_LOADER implementation.
|
||||
*/
|
||||
virtual bool joinWorkers() = 0;
|
||||
|
||||
/**
|
||||
* Stop worker threads. Part of the FOOTPRINT_ASYNC_LOADER implementation.
|
||||
*/
|
||||
virtual void stopWorkers() = 0;
|
||||
|
||||
private:
|
||||
friend class FOOTPRINT_ASYNC_LOADER;
|
||||
|
||||
protected:
|
||||
FP_LIB_TABLE* m_lib_table; ///< no ownership
|
||||
|
||||
@ -297,75 +275,5 @@ protected:
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Object used to populate a #FOOTPRINT_LIST asynchronously.
|
||||
*
|
||||
* Construct one, calling #Start(), and then waiting until it reports completion. This is
|
||||
* equivalent to calling #FOOTPRINT_LIST::ReadFootprintFiles().
|
||||
*/
|
||||
class APIEXPORT FOOTPRINT_ASYNC_LOADER
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Construct an asynchronous loader.
|
||||
*/
|
||||
FOOTPRINT_ASYNC_LOADER();
|
||||
|
||||
~FOOTPRINT_ASYNC_LOADER();
|
||||
|
||||
/**
|
||||
* Assign a FOOTPRINT_LIST to the loader. This does not take ownership of
|
||||
* the list.
|
||||
*/
|
||||
void SetList( FOOTPRINT_LIST* aList );
|
||||
|
||||
/**
|
||||
* Launch the worker threads.
|
||||
*
|
||||
* @param aTable defines all the libraries.
|
||||
* @param aNickname is the library to read from, or if NULL means read all footprints from
|
||||
* all known libraries in \a aTable.
|
||||
* @param aNThreads is the number of worker threads.
|
||||
*/
|
||||
void Start( FP_LIB_TABLE* aTable, const wxString* aNickname = nullptr,
|
||||
unsigned aNThreads = DEFAULT_THREADS );
|
||||
|
||||
/**
|
||||
* Wait until the worker threads are finished, and then perform any required
|
||||
* single-threaded finishing on the list. This must be called before using
|
||||
* the list, even if the completion callback was used!
|
||||
*
|
||||
* It is safe to call this method from a thread, but it is not safe to use
|
||||
* the list from ANY thread until it completes. It is recommended to call
|
||||
* this from the main thread because of this.
|
||||
*
|
||||
* It is safe to call this multiple times, but after the first it will
|
||||
* always return true.
|
||||
*
|
||||
* @return true if no errors occurred
|
||||
*/
|
||||
bool Join();
|
||||
|
||||
/**
|
||||
* Safely stop the current process.
|
||||
*/
|
||||
void Abort();
|
||||
|
||||
private:
|
||||
/**
|
||||
* Default number of worker threads. Determined empirically (by dickelbeck):
|
||||
* More than 6 is not significantly faster, less than 6 is likely slower.
|
||||
*/
|
||||
static constexpr unsigned DEFAULT_THREADS = 6;
|
||||
|
||||
friend class FOOTPRINT_LIST;
|
||||
friend class FOOTPRINT_LIST_IMPL;
|
||||
|
||||
FOOTPRINT_LIST* m_list;
|
||||
std::string m_last_table;
|
||||
|
||||
int m_total_libs;
|
||||
};
|
||||
|
||||
|
||||
#endif // FOOTPRINT_INFO_H_
|
||||
|
@ -176,4 +176,21 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* RAII class to safely set/reset nil KIIDs for use in footprint/symbol loading
|
||||
*/
|
||||
class KIID_NIL_SET_RESET
|
||||
{
|
||||
public:
|
||||
KIID_NIL_SET_RESET()
|
||||
{
|
||||
KIID::CreateNilUuids( true );
|
||||
};
|
||||
|
||||
~KIID_NIL_SET_RESET()
|
||||
{
|
||||
KIID::CreateNilUuids( false );
|
||||
}
|
||||
};
|
||||
|
||||
#endif // KIID_H
|
||||
|
42
include/thread_pool.h
Normal file
42
include/thread_pool.h
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* This program source code file is part of KiCad, a free EDA CAD application.
|
||||
*
|
||||
* Copyright (C) 2022 KiCad Developers, see CHANGELOG.TXT for contributors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 3
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you may find one here:
|
||||
* http://www.gnu.org/licenses/gpl-3.0.html
|
||||
* or you may search the http://www.gnu.org website for the version 3 license,
|
||||
* or you may write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#ifndef INCLUDE_THREAD_POOL_H_
|
||||
#define INCLUDE_THREAD_POOL_H_
|
||||
|
||||
#include <bs_thread_pool.hpp>
|
||||
|
||||
using thread_pool = BS::thread_pool;
|
||||
|
||||
/**
|
||||
* Get a reference to the current thread pool. N.B., you cannot copy the thread pool
|
||||
* so if you accidentally write thread_pool tp = GetKiCadThreadPool(), you will break
|
||||
* your compilation
|
||||
*
|
||||
* @return Reference to the current (potentially newly constructed) thread pool
|
||||
*/
|
||||
thread_pool& GetKiCadThreadPool();
|
||||
|
||||
|
||||
#endif /* INCLUDE_THREAD_POOL_H_ */
|
@ -94,6 +94,10 @@ endif()
|
||||
|
||||
target_link_libraries( kicad pcm )
|
||||
|
||||
target_include_directories( kicad PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
install( TARGETS kicad
|
||||
DESTINATION ${KICAD_BIN}
|
||||
COMPONENT binary
|
||||
|
@ -116,6 +116,11 @@ set_target_properties( pl_editor_kiface PROPERTIES
|
||||
PREFIX ${KIFACE_PREFIX}
|
||||
SUFFIX ${KIFACE_SUFFIX}
|
||||
)
|
||||
|
||||
target_include_directories( pl_editor_kiface PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
set_source_files_properties( pl_editor.cpp PROPERTIES
|
||||
# The KIFACE is in pcbnew.cpp, export it:
|
||||
COMPILE_DEFINITIONS "BUILD_KIWAY_DLL;COMPILING_DLL"
|
||||
|
@ -666,6 +666,10 @@ target_link_libraries( pcbnew_kiface_objects
|
||||
nlohmann_json
|
||||
)
|
||||
|
||||
target_include_directories( pcbnew_kiface_objects PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( KICAD_STEP_EXPORT_LIB )
|
||||
add_definitions( -DKICAD_STEP_EXPORT_LIB )
|
||||
endif()
|
||||
@ -720,7 +724,7 @@ target_link_libraries( pcbnew_kiface
|
||||
PRIVATE
|
||||
${PCBNEW_KIFACE_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
set_source_files_properties( pcbnew.cpp PROPERTIES
|
||||
# The KIFACE is in pcbnew.cpp, export it:
|
||||
COMPILE_DEFINITIONS "BUILD_KIWAY_DLL;COMPILING_DLL"
|
||||
|
@ -26,37 +26,39 @@
|
||||
*/
|
||||
|
||||
#include <iterator>
|
||||
#include <thread>
|
||||
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <drc/drc_rtree.h>
|
||||
#include <pcb_base_frame.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <reporter.h>
|
||||
#include <board_commit.h>
|
||||
#include <board.h>
|
||||
#include <core/arraydim.h>
|
||||
#include <core/kicad_algo.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
#include <convert_shape_list_to_polygon.h>
|
||||
#include <footprint.h>
|
||||
#include <pcb_base_frame.h>
|
||||
#include <pcb_track.h>
|
||||
#include <zone.h>
|
||||
#include <pcb_marker.h>
|
||||
#include <pcb_group.h>
|
||||
#include <pcb_target.h>
|
||||
#include <pcb_shape.h>
|
||||
#include <pcb_text.h>
|
||||
#include <pcb_textbox.h>
|
||||
#include <core/arraydim.h>
|
||||
#include <core/kicad_algo.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
#include <string_utils.h>
|
||||
#include <pgm_base.h>
|
||||
#include <pcbnew_settings.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <project.h>
|
||||
#include <project/net_settings.h>
|
||||
#include <project/project_file.h>
|
||||
#include <project/project_local_settings.h>
|
||||
#include <ratsnest/ratsnest_data.h>
|
||||
#include <reporter.h>
|
||||
#include <tool/selection_conditions.h>
|
||||
#include <convert_shape_list_to_polygon.h>
|
||||
#include <wx/log.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
// This is an odd place for this, but CvPcb won't link if it's in board_item.cpp like I first
|
||||
// tried it.
|
||||
@ -639,40 +641,40 @@ void BOARD::CacheTriangulation( PROGRESS_REPORTER* aReporter, const std::vector<
|
||||
if( aReporter )
|
||||
aReporter->Report( _( "Tessellating copper zones..." ) );
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> zones_done( 0 );
|
||||
std::atomic<size_t> threads_done( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ &zones, &zones_done, &threads_done, &next ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < zones.size(); i = next.fetch_add( 1 ) )
|
||||
{
|
||||
zones[i]->CacheTriangulation();
|
||||
zones_done.fetch_add( 1 );
|
||||
}
|
||||
returns.reserve( zones.size() );
|
||||
|
||||
threads_done.fetch_add( 1 );
|
||||
} );
|
||||
auto cache_zones = [aReporter]( ZONE* aZone ) -> size_t
|
||||
{
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
t.detach();
|
||||
}
|
||||
aZone->CacheTriangulation();
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( ZONE* zone : zones )
|
||||
returns.emplace_back( tp.submit( cache_zones, zone ) );
|
||||
|
||||
// Finalize the triangulation threads
|
||||
while( threads_done < parallelThreadCount )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
if( aReporter )
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
aReporter->SetCurrentProgress( (double) zones_done / (double) zones.size() );
|
||||
aReporter->KeepRefreshing();
|
||||
}
|
||||
if( aReporter )
|
||||
aReporter->KeepRefreshing();
|
||||
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -4,6 +4,7 @@ include_directories(
|
||||
./
|
||||
../
|
||||
../../include
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
${INC_AFTER}
|
||||
)
|
||||
|
||||
@ -18,4 +19,5 @@ add_library( connectivity STATIC ${PCBNEW_CONN_SRCS} )
|
||||
|
||||
target_link_libraries( connectivity PRIVATE
|
||||
common
|
||||
threadpool
|
||||
)
|
@ -24,18 +24,19 @@
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
#include <mutex>
|
||||
|
||||
#include <connectivity/connectivity_algo.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <geometry/geometry_utils.h>
|
||||
#include <board_commit.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
|
||||
#ifdef PROFILE
|
||||
#include <profile.h>
|
||||
#endif
|
||||
@ -221,6 +222,7 @@ void CN_CONNECTIVITY_ALGO::searchConnections()
|
||||
PROF_COUNTER search_basic( "search-basic" );
|
||||
#endif
|
||||
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<CN_ITEM*> dirtyItems;
|
||||
std::copy_if( m_itemList.begin(), m_itemList.end(), std::back_inserter( dirtyItems ),
|
||||
[] ( CN_ITEM* aItem )
|
||||
@ -238,57 +240,39 @@ void CN_CONNECTIVITY_ALGO::searchConnections()
|
||||
|
||||
if( m_itemList.IsDirty() )
|
||||
{
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( dirtyItems.size() + 7 ) / 8 );
|
||||
|
||||
std::atomic<size_t> nextItem( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
std::vector<std::future<size_t>> returns( dirtyItems.size() );
|
||||
|
||||
auto conn_lambda =
|
||||
[&nextItem, &dirtyItems]( CN_LIST* aItemList,
|
||||
[&dirtyItems]( size_t aItem, CN_LIST* aItemList,
|
||||
PROGRESS_REPORTER* aReporter) -> size_t
|
||||
{
|
||||
for( size_t i = nextItem++; i < dirtyItems.size(); i = nextItem++ )
|
||||
{
|
||||
CN_VISITOR visitor( dirtyItems[i] );
|
||||
aItemList->FindNearby( dirtyItems[i], visitor );
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
if( aReporter )
|
||||
{
|
||||
if( aReporter->IsCancelled() )
|
||||
break;
|
||||
else
|
||||
aReporter->AdvanceProgress();
|
||||
}
|
||||
}
|
||||
CN_VISITOR visitor( dirtyItems[aItem] );
|
||||
aItemList->FindNearby( dirtyItems[aItem], visitor );
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount <= 1 )
|
||||
{
|
||||
conn_lambda( &m_itemList, m_progressReporter );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
returns[ii] = std::async( std::launch::async, conn_lambda, &m_itemList,
|
||||
m_progressReporter );
|
||||
}
|
||||
for( size_t ii = 0; ii < dirtyItems.size(); ++ii )
|
||||
returns[ii] = tp.submit( conn_lambda, ii, &m_itemList, m_progressReporter );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
// Here we balance returns with a 100ms timeout to allow UI updating
|
||||
std::future_status status;
|
||||
do
|
||||
{
|
||||
// Here we balance returns with a 100ms timeout to allow UI updating
|
||||
std::future_status status;
|
||||
do
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
|
||||
status = returns[ii].wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
if( m_progressReporter )
|
||||
@ -470,37 +454,37 @@ void CN_CONNECTIVITY_ALGO::Build( BOARD* aBoard, PROGRESS_REPORTER* aReporter )
|
||||
|
||||
// Generate RTrees for CN_ZONE_LAYER items (in parallel)
|
||||
//
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> zitems_done( 0 );
|
||||
std::atomic<size_t> threads_done( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns( zitems.size() );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
auto cache_zones = [aReporter]( CN_ZONE_LAYER* aZone ) -> size_t
|
||||
{
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
aZone->BuildRTree();
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < zitems.size(); ++ii )
|
||||
returns[ii] = tp.submit( cache_zones, zitems[ii] );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ &zitems, &zitems_done, &threads_done, &next ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < zitems.size(); i = next.fetch_add( 1 ) )
|
||||
{
|
||||
zitems[i]->BuildRTree();
|
||||
zitems_done.fetch_add( 1 );
|
||||
}
|
||||
std::future_status status;
|
||||
|
||||
threads_done.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
}
|
||||
|
||||
while( threads_done < parallelThreadCount )
|
||||
{
|
||||
if( aReporter )
|
||||
do
|
||||
{
|
||||
aReporter->SetCurrentProgress( zitems_done / size );
|
||||
aReporter->KeepRefreshing();
|
||||
}
|
||||
if( aReporter )
|
||||
aReporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
}
|
||||
|
||||
// Add CN_ZONE_LAYERS, tracks, and pads to connectivity
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <profile.h>
|
||||
#endif
|
||||
|
||||
#include <thread>
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
#include <initializer_list>
|
||||
@ -40,6 +39,7 @@
|
||||
#include <geometry/shape_circle.h>
|
||||
#include <ratsnest/ratsnest_data.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
#include <trigo.h>
|
||||
#include <drc/drc_rtree.h>
|
||||
|
||||
@ -169,35 +169,12 @@ void CONNECTIVITY_DATA::updateRatsnest()
|
||||
return aNet->IsDirty() && aNet->GetNodeCount() > 0;
|
||||
} );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( dirty_nets.size() + 7 ) / 8 );
|
||||
|
||||
std::atomic<size_t> nextNet( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto update_lambda =
|
||||
[this, &nextNet, &dirty_nets]() -> size_t
|
||||
GetKiCadThreadPool().parallelize_loop( 0, dirty_nets.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( size_t i = nextNet++; i < dirty_nets.size(); i = nextNet++ )
|
||||
dirty_nets[i]->Update( m_exclusions );
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount <= 1 )
|
||||
{
|
||||
update_lambda();
|
||||
}
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the ratsnest threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
dirty_nets[ii]->Update( m_exclusions );
|
||||
}).wait();
|
||||
|
||||
#ifdef PROFILE
|
||||
rnUpdate.Show();
|
||||
|
@ -22,10 +22,9 @@
|
||||
*/
|
||||
|
||||
#include <common.h>
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board_design_settings.h>
|
||||
#include <footprint.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
#include <drc/drc_engine.h>
|
||||
@ -149,58 +148,60 @@ bool DRC_CACHE_GENERATOR::Run()
|
||||
|
||||
// Cache zone bounding boxes, triangulation, copper zone rtrees, and footprint courtyards
|
||||
// before we start.
|
||||
//
|
||||
|
||||
m_drcEngine->SetMaxProgress( allZones.size() );
|
||||
|
||||
for( FOOTPRINT* footprint : m_board->Footprints() )
|
||||
footprint->BuildPolyCourtyards();
|
||||
|
||||
count = allZones.size();
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 0 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
|
||||
for( ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ this, &allZones, &done, &threads_finished, &next, count ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < count; i = next.fetch_add( 1 ) )
|
||||
{
|
||||
ZONE* zone = allZones[ i ];
|
||||
for( ZONE* zone : footprint->Zones() )
|
||||
allZones.push_back( zone );
|
||||
|
||||
zone->CacheBoundingBox();
|
||||
zone->CacheTriangulation();
|
||||
|
||||
if( !zone->GetIsRuleArea() && zone->IsOnCopperLayer() )
|
||||
{
|
||||
std::unique_ptr<DRC_RTREE> rtree = std::make_unique<DRC_RTREE>();
|
||||
|
||||
for( PCB_LAYER_ID layer : zone->GetLayerSet().Seq() )
|
||||
{
|
||||
if( IsCopperLayer( layer ) )
|
||||
rtree->Insert( zone, layer );
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> cacheLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ zone ] = std::move( rtree );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
footprint->BuildPolyCourtyards();
|
||||
}
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( allZones.size() );
|
||||
|
||||
auto cache_zones = [this]( ZONE* aZone ) -> size_t
|
||||
{
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
aZone->CacheBoundingBox();
|
||||
aZone->CacheTriangulation();
|
||||
|
||||
if( !aZone->GetIsRuleArea() && aZone->IsOnCopperLayer() )
|
||||
{
|
||||
std::unique_ptr<DRC_RTREE> rtree = std::make_unique<DRC_RTREE>();
|
||||
|
||||
for( PCB_LAYER_ID layer : aZone->GetLayerSet().Seq() )
|
||||
{
|
||||
if( IsCopperLayer( layer ) )
|
||||
rtree->Insert( aZone, layer );
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> cacheLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ aZone ] = std::move( rtree );
|
||||
m_drcEngine->AdvanceProgress();
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( ZONE* zone : allZones )
|
||||
returns.emplace_back( tp.submit( cache_zones, zone ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
reportProgress( done, count, 1 );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->KeepRefreshing();
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
return !m_drcEngine->IsCancelled();
|
||||
|
@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <reporter.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
@ -40,6 +39,7 @@
|
||||
#include <footprint.h>
|
||||
#include <pad.h>
|
||||
#include <pcb_track.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
|
||||
@ -1514,6 +1514,29 @@ void DRC_ENGINE::ReportAux ( const wxString& aStr )
|
||||
}
|
||||
|
||||
|
||||
bool DRC_ENGINE::KeepRefreshing( bool aWait )
|
||||
{
|
||||
if( !m_progressReporter )
|
||||
return true;
|
||||
|
||||
return m_progressReporter->KeepRefreshing( aWait );
|
||||
}
|
||||
|
||||
|
||||
void DRC_ENGINE::AdvanceProgress()
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->AdvanceProgress();
|
||||
}
|
||||
|
||||
|
||||
void DRC_ENGINE::SetMaxProgress( int aSize )
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->SetMaxProgress( aSize );
|
||||
}
|
||||
|
||||
|
||||
bool DRC_ENGINE::ReportProgress( double aProgress )
|
||||
{
|
||||
if( !m_progressReporter )
|
||||
|
@ -167,6 +167,9 @@ public:
|
||||
void ReportViolation( const std::shared_ptr<DRC_ITEM>& aItem, const VECTOR2I& aPos,
|
||||
PCB_LAYER_ID aMarkerLayer );
|
||||
|
||||
bool KeepRefreshing( bool aWait = false );
|
||||
void AdvanceProgress();
|
||||
void SetMaxProgress( int aSize );
|
||||
bool ReportProgress( double aProgress );
|
||||
bool ReportPhase( const wxString& aMessage );
|
||||
void ReportAux( const wxString& aStr );
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <common.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <drc/drc_rtree.h>
|
||||
@ -31,6 +30,8 @@
|
||||
#include <drc/drc_rule.h>
|
||||
#include <drc/drc_test_provider.h>
|
||||
#include <pad.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
|
||||
@ -106,86 +107,92 @@ bool DRC_TEST_PROVIDER_DISALLOW::Run()
|
||||
}
|
||||
}
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 0 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
PROGRESS_REPORTER* reporter = m_drcEngine->GetProgressReporter();
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]()
|
||||
auto query_areas = [&]( std::pair<ZONE*, ZONE*> aCache ) -> size_t
|
||||
{
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
ZONE* ruleArea = aCache.first;
|
||||
ZONE* copperZone = aCache.second;
|
||||
EDA_RECT areaBBox = ruleArea->GetCachedBoundingBox();
|
||||
EDA_RECT copperBBox = copperZone->GetCachedBoundingBox();
|
||||
bool isInside = false;
|
||||
|
||||
if( copperZone->IsFilled() && areaBBox.Intersects( copperBBox ) )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < toCache.size(); i = next.fetch_add( 1 ) )
|
||||
// Collisions include touching, so we need to deflate outline by
|
||||
// enough to exclude it. This is particularly important for detecting
|
||||
// copper fills as they will be exactly touching along the entire
|
||||
// exclusion border.
|
||||
SHAPE_POLY_SET areaPoly = ruleArea->Outline()->CloneDropTriangulation();
|
||||
areaPoly.Deflate( epsilon, 0, SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
DRC_RTREE* zoneRTree = board->m_CopperZoneRTreeCache[ copperZone ].get();
|
||||
|
||||
if( zoneRTree )
|
||||
{
|
||||
ZONE* ruleArea = toCache[i].first;
|
||||
ZONE* copperZone = toCache[i].second;
|
||||
EDA_RECT areaBBox = ruleArea->GetCachedBoundingBox();
|
||||
EDA_RECT copperBBox = copperZone->GetCachedBoundingBox();
|
||||
bool isInside = false;
|
||||
|
||||
if( copperZone->IsFilled() && areaBBox.Intersects( copperBBox ) )
|
||||
for( PCB_LAYER_ID layer : ruleArea->GetLayerSet().Seq() )
|
||||
{
|
||||
// Collisions include touching, so we need to deflate outline by
|
||||
// enough to exclude it. This is particularly important for detecting
|
||||
// copper fills as they will be exactly touching along the entire
|
||||
// exclusion border.
|
||||
SHAPE_POLY_SET areaPoly = ruleArea->Outline()->CloneDropTriangulation();
|
||||
areaPoly.Deflate( epsilon, 0, SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
DRC_RTREE* zoneRTree = board->m_CopperZoneRTreeCache[ copperZone ].get();
|
||||
|
||||
if( zoneRTree )
|
||||
if( zoneRTree->QueryColliding( areaBBox, &areaPoly, layer ) )
|
||||
{
|
||||
for( PCB_LAYER_ID layer : ruleArea->GetLayerSet().Seq() )
|
||||
{
|
||||
if( zoneRTree->QueryColliding( areaBBox, &areaPoly, layer ) )
|
||||
{
|
||||
isInside = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
}
|
||||
isInside = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
|
||||
std::tuple<BOARD_ITEM*, BOARD_ITEM*, PCB_LAYER_ID> key( ruleArea,
|
||||
copperZone,
|
||||
UNDEFINED_LAYER );
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> cacheLock( board->m_CachesMutex );
|
||||
board->m_InsideAreaCache[ key ] = isInside;
|
||||
}
|
||||
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
t.detach();
|
||||
}
|
||||
std::tuple<BOARD_ITEM*, BOARD_ITEM*, PCB_LAYER_ID> key( ruleArea,
|
||||
copperZone,
|
||||
UNDEFINED_LAYER );
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
{
|
||||
std::unique_lock<std::mutex> cacheLock( board->m_CachesMutex );
|
||||
board->m_InsideAreaCache[ key ] = isInside;
|
||||
}
|
||||
|
||||
m_drcEngine->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( toCache.size() );
|
||||
|
||||
for( auto& cache : toCache )
|
||||
returns.emplace_back( tp.submit( query_areas, cache ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) totalCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
if( reporter )
|
||||
reporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
|
||||
// Now go through all the board objects calling the DRC_ENGINE to run the actual dissallow
|
||||
// Now go through all the board objects calling the DRC_ENGINE to run the actual disallow
|
||||
// tests. These should be reasonably quick using the caches generated above.
|
||||
//
|
||||
int delta = 100;
|
||||
int ii = done;
|
||||
int ii = static_cast<int>( toCache.size() );
|
||||
|
||||
auto checkTextOnEdgeCuts =
|
||||
[&]( BOARD_ITEM* item )
|
||||
|
@ -22,7 +22,6 @@
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <zone.h>
|
||||
@ -33,6 +32,8 @@
|
||||
#include <drc/drc_item.h>
|
||||
#include <drc/drc_test_provider.h>
|
||||
#include <advanced_config.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
/*
|
||||
Checks for slivers in copper layers
|
||||
@ -92,6 +93,7 @@ bool DRC_TEST_PROVIDER_SLIVER_CHECKER::Run()
|
||||
|
||||
// Report progress on board zones only. Everything else is in the noise.
|
||||
int zoneLayerCount = 0;
|
||||
std::atomic<size_t> done( 1 );
|
||||
|
||||
for( PCB_LAYER_ID layer : copperLayers )
|
||||
{
|
||||
@ -102,86 +104,91 @@ bool DRC_TEST_PROVIDER_SLIVER_CHECKER::Run()
|
||||
}
|
||||
}
|
||||
|
||||
// The first completion may be a long time coming, so this one gets us started.
|
||||
zoneLayerCount++;
|
||||
PROGRESS_REPORTER* reporter = m_drcEngine->GetProgressReporter();
|
||||
|
||||
if( !m_drcEngine->ReportProgress( 1.0 / (double) zoneLayerCount ) )
|
||||
if( reporter && reporter->IsCancelled() )
|
||||
return false; // DRC cancelled
|
||||
|
||||
std::vector<SHAPE_POLY_SET> layerPolys;
|
||||
layerPolys.resize( layerCount );
|
||||
std::vector<SHAPE_POLY_SET> layerPolys( layerCount );
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 1 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]( )
|
||||
auto sliver_checker =
|
||||
[&]( int aItem ) -> size_t
|
||||
{
|
||||
for( int i = next.fetch_add( 1 ); i < layerCount; i = next.fetch_add( 1 ) )
|
||||
{
|
||||
PCB_LAYER_ID layer = copperLayers[i];
|
||||
SHAPE_POLY_SET& poly = layerPolys[i];
|
||||
SHAPE_POLY_SET fill;
|
||||
PCB_LAYER_ID layer = copperLayers[aItem];
|
||||
SHAPE_POLY_SET& poly = layerPolys[aItem];
|
||||
|
||||
forEachGeometryItem( s_allBasicItems, LSET().set( layer ),
|
||||
[&]( BOARD_ITEM* item ) -> bool
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
SHAPE_POLY_SET fill;
|
||||
|
||||
forEachGeometryItem( s_allBasicItems, LSET().set( layer ),
|
||||
[&]( BOARD_ITEM* item ) -> bool
|
||||
{
|
||||
if( dynamic_cast<ZONE*>( item) )
|
||||
{
|
||||
if( dynamic_cast<ZONE*>( item) )
|
||||
ZONE* zone = static_cast<ZONE*>( item );
|
||||
|
||||
if( !zone->GetIsRuleArea() )
|
||||
{
|
||||
ZONE* zone = static_cast<ZONE*>( item );
|
||||
fill = zone->GetFill( layer )->CloneDropTriangulation();
|
||||
fill.Unfracture( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
if( !zone->GetIsRuleArea() )
|
||||
{
|
||||
fill = zone->GetFill( layer )->CloneDropTriangulation();
|
||||
fill.Unfracture( SHAPE_POLY_SET::PM_FAST );
|
||||
for( int jj = 0; jj < fill.OutlineCount(); ++jj )
|
||||
poly.AddOutline( fill.Outline( jj ) );
|
||||
|
||||
for( int jj = 0; jj < fill.OutlineCount(); ++jj )
|
||||
poly.AddOutline( fill.Outline( jj ) );
|
||||
|
||||
// Report progress on board zones only. Everything
|
||||
// else is in the noise.
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
item->TransformShapeWithClearanceToPolygon( poly, layer, 0,
|
||||
ARC_LOW_DEF,
|
||||
ERROR_OUTSIDE );
|
||||
// Report progress on board zones only. Everything
|
||||
// else is in the noise.
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
item->TransformShapeWithClearanceToPolygon( poly, layer, 0,
|
||||
ARC_LOW_DEF,
|
||||
ERROR_OUTSIDE );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
|
||||
return true;
|
||||
} );
|
||||
return true;
|
||||
} );
|
||||
|
||||
poly.Simplify( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
// Sharpen corners
|
||||
poly.Deflate( widthTolerance / 2, ARC_LOW_DEF,
|
||||
SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
}
|
||||
poly.Simplify( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
// Sharpen corners
|
||||
poly.Deflate( widthTolerance / 2, ARC_LOW_DEF,
|
||||
SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
t.detach();
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( copperLayers.size() );
|
||||
|
||||
for( size_t ii = 0; ii < copperLayers.size(); ++ii )
|
||||
returns.emplace_back( tp.submit( sliver_checker, ii ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) zoneLayerCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->ReportProgress( static_cast<double>( zoneLayerCount ) / done );
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
|
||||
for( int ii = 0; ii < layerCount; ++ii )
|
||||
{
|
||||
PCB_LAYER_ID layer = copperLayers[ii];
|
||||
|
@ -21,8 +21,6 @@
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
@ -30,6 +28,8 @@
|
||||
#include <footprint.h>
|
||||
#include <pad.h>
|
||||
#include <pcb_track.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <geometry/shape_line_chain.h>
|
||||
#include <geometry/shape_poly_set.h>
|
||||
#include <drc/drc_rule.h>
|
||||
@ -193,36 +193,33 @@ bool DRC_TEST_PROVIDER_ZONE_CONNECTIONS::Run()
|
||||
zoneLayers.push_back( { zone, layer } );
|
||||
}
|
||||
|
||||
int zoneLayerCount = zoneLayers.size();
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 1 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<int>> returns;
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns.reserve( zoneLayers.size() );
|
||||
|
||||
for( auto& zonelayer : zoneLayers )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]( )
|
||||
returns.emplace_back( tp.submit([&]( ZONE* aZone, PCB_LAYER_ID aLayer ) -> int
|
||||
{
|
||||
for( int i = next.fetch_add( 1 ); i < zoneLayerCount; i = next.fetch_add( 1 ) )
|
||||
if( !m_drcEngine->IsCancelled() )
|
||||
{
|
||||
testZoneLayer( zoneLayers[i].first, zoneLayers[i].second );
|
||||
done.fetch_add( 1 );
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
testZoneLayer( aZone, aLayer );
|
||||
m_drcEngine->AdvanceProgress();
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
return 0;
|
||||
}, zonelayer.first, zonelayer.second ) );
|
||||
}
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) zoneLayerCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->KeepRefreshing();
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
return !m_drcEngine->IsCancelled();
|
||||
|
@ -22,22 +22,22 @@
|
||||
|
||||
#include <footprint_info_impl.h>
|
||||
|
||||
#include <dialogs/html_message_box.h>
|
||||
#include <footprint.h>
|
||||
#include <footprint_info.h>
|
||||
#include <fp_lib_table.h>
|
||||
#include <dialogs/html_message_box.h>
|
||||
#include <string_utils.h>
|
||||
#include <locale_io.h>
|
||||
#include <kiway.h>
|
||||
#include <locale_io.h>
|
||||
#include <lib_id.h>
|
||||
#include <wildcards_and_files_ext.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <wildcards_and_files_ext.h>
|
||||
|
||||
#include <wx/textfile.h>
|
||||
#include <wx/txtstrm.h>
|
||||
#include <wx/wfstream.h>
|
||||
|
||||
#include <thread>
|
||||
|
||||
|
||||
void FOOTPRINT_INFO_IMPL::load()
|
||||
{
|
||||
@ -95,26 +95,6 @@ bool FOOTPRINT_LIST_IMPL::CatchErrors( const std::function<void()>& aFunc )
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::loader_job()
|
||||
{
|
||||
wxString nickname;
|
||||
|
||||
while( m_queue_in.pop( nickname ) && !m_cancelled )
|
||||
{
|
||||
CatchErrors( [this, &nickname]()
|
||||
{
|
||||
m_lib_table->PrefetchLib( nickname );
|
||||
m_queue_out.push( nickname );
|
||||
} );
|
||||
|
||||
m_count_finished.fetch_add( 1 );
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxString* aNickname,
|
||||
PROGRESS_REPORTER* aProgressReporter )
|
||||
{
|
||||
@ -123,6 +103,9 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
||||
if( generatedTimestamp == m_list_timestamp )
|
||||
return true;
|
||||
|
||||
// Disable KIID generation: not needed for library parts; sometimes very slow
|
||||
KIID_NIL_SET_RESET reset_kiid;
|
||||
|
||||
m_progress_reporter = aProgressReporter;
|
||||
|
||||
if( m_progress_reporter )
|
||||
@ -132,25 +115,28 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
||||
}
|
||||
|
||||
m_cancelled = false;
|
||||
m_lib_table = aTable;
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER loader;
|
||||
// Clear data before reading files
|
||||
m_errors.clear();
|
||||
m_list.clear();
|
||||
m_queue_in.clear();
|
||||
m_queue_out.clear();
|
||||
|
||||
loader.SetList( this );
|
||||
loader.Start( aTable, aNickname );
|
||||
|
||||
while( !m_cancelled && (int)m_count_finished.load() < m_loader->m_total_libs )
|
||||
if( aNickname )
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
|
||||
wxMilliSleep( 33 /* 30 FPS refresh rate */);
|
||||
}
|
||||
|
||||
if( m_cancelled )
|
||||
{
|
||||
loader.Abort();
|
||||
m_queue_in.push( *aNickname );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( auto const& nickname : aTable->GetLogicalLibs() )
|
||||
m_queue_in.push( nickname );
|
||||
}
|
||||
|
||||
|
||||
loadLibs();
|
||||
|
||||
if( !m_cancelled )
|
||||
{
|
||||
if( m_progress_reporter )
|
||||
{
|
||||
@ -159,7 +145,7 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
||||
m_progress_reporter->Report( _( "Loading footprints..." ) );
|
||||
}
|
||||
|
||||
loader.Join();
|
||||
loadFootprints();
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvancePhase();
|
||||
@ -174,75 +160,54 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::startWorkers( FP_LIB_TABLE* aTable, wxString const* aNickname,
|
||||
FOOTPRINT_ASYNC_LOADER* aLoader, unsigned aNThreads )
|
||||
void FOOTPRINT_LIST_IMPL::loadLibs()
|
||||
{
|
||||
m_loader = aLoader;
|
||||
m_lib_table = aTable;
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
size_t num_returns = m_queue_in.size();
|
||||
std::vector<std::future<size_t>> returns( num_returns );
|
||||
|
||||
// Clear data before reading files
|
||||
m_count_finished.store( 0 );
|
||||
m_errors.clear();
|
||||
m_list.clear();
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_queue_out.clear();
|
||||
auto loader_job = [this]() -> size_t
|
||||
{
|
||||
wxString nickname;
|
||||
size_t retval = 0;
|
||||
|
||||
if( aNickname )
|
||||
if( !m_cancelled && m_queue_in.pop( nickname ) )
|
||||
{
|
||||
if( CatchErrors( [this, &nickname]()
|
||||
{
|
||||
m_lib_table->PrefetchLib( nickname );
|
||||
m_queue_out.push( nickname );
|
||||
} ) && m_progress_reporter )
|
||||
{
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
}
|
||||
|
||||
++retval;
|
||||
}
|
||||
|
||||
return retval;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < num_returns; ++ii )
|
||||
returns[ii] = tp.submit( loader_job );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_queue_in.push( *aNickname );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( auto const& nickname : aTable->GetLogicalLibs() )
|
||||
m_queue_in.push( nickname );
|
||||
}
|
||||
std::future_status status;
|
||||
|
||||
m_loader->m_total_libs = m_queue_in.size();
|
||||
do
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
|
||||
for( unsigned i = 0; i < aNThreads; ++i )
|
||||
{
|
||||
m_threads.emplace_back( &FOOTPRINT_LIST_IMPL::loader_job, this );
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::stopWorkers()
|
||||
void FOOTPRINT_LIST_IMPL::loadFootprints()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock1( m_join );
|
||||
|
||||
// To safely stop our workers, we set the cancellation flag (they will each
|
||||
// exit on their next safe loop location when this is set). Then we need to wait
|
||||
// for all threads to finish as closing the implementation will free the queues
|
||||
// that the threads write to.
|
||||
for( auto& i : m_threads )
|
||||
i.join();
|
||||
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_count_finished.store( 0 );
|
||||
|
||||
// If we have canceled in the middle of a load, clear our timestamp to re-load next time
|
||||
if( m_cancelled )
|
||||
m_list_timestamp = 0;
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock1( m_join );
|
||||
|
||||
for( auto& i : m_threads )
|
||||
i.join();
|
||||
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_count_finished.store( 0 );
|
||||
}
|
||||
|
||||
size_t total_count = m_queue_out.size();
|
||||
|
||||
LOCALE_IO toggle_locale;
|
||||
|
||||
// Parse the footprints in parallel. WARNING! This requires changing the locale, which is
|
||||
@ -253,85 +218,58 @@ bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
||||
// TODO: blast LOCALE_IO into the sun
|
||||
|
||||
SYNC_QUEUE<std::unique_ptr<FOOTPRINT_INFO>> queue_parsed;
|
||||
std::vector<std::thread> threads;
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
size_t num_elements = m_queue_out.size();
|
||||
std::vector<std::future<size_t>> returns( num_elements );
|
||||
|
||||
for( size_t ii = 0; ii < std::thread::hardware_concurrency() + 1; ++ii )
|
||||
auto fp_thread = [ this, &queue_parsed ]() -> size_t
|
||||
{
|
||||
threads.emplace_back( [this, &queue_parsed]() {
|
||||
wxString nickname;
|
||||
wxString nickname;
|
||||
|
||||
while( m_queue_out.pop( nickname ) && !m_cancelled )
|
||||
if( m_queue_out.pop( nickname ) && !m_cancelled )
|
||||
{
|
||||
wxArrayString fpnames;
|
||||
|
||||
if( !CatchErrors( [&]()
|
||||
{ m_lib_table->FootprintEnumerate( fpnames, nickname, false ); } ) )
|
||||
{
|
||||
wxArrayString fpnames;
|
||||
|
||||
try
|
||||
{
|
||||
m_lib_table->FootprintEnumerate( fpnames, nickname, false );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
catch( const std::exception& se )
|
||||
{
|
||||
// This is a round about way to do this, but who knows what THROW_IO_ERROR()
|
||||
// may be tricked out to do someday, keep it in the game.
|
||||
try
|
||||
{
|
||||
THROW_IO_ERROR( se.what() );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
}
|
||||
|
||||
for( unsigned jj = 0; jj < fpnames.size() && !m_cancelled; ++jj )
|
||||
{
|
||||
try
|
||||
{
|
||||
wxString fpname = fpnames[jj];
|
||||
FOOTPRINT_INFO* fpinfo = new FOOTPRINT_INFO_IMPL( this, nickname, fpname );
|
||||
queue_parsed.move_push( std::unique_ptr<FOOTPRINT_INFO>( fpinfo ) );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
catch( const std::exception& se )
|
||||
{
|
||||
// This is a round about way to do this, but who knows what THROW_IO_ERROR()
|
||||
// may be tricked out to do someday, keep it in the game.
|
||||
try
|
||||
{
|
||||
THROW_IO_ERROR( se.what() );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
|
||||
m_count_finished.fetch_add( 1 );
|
||||
return 0;
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
while( !m_cancelled && (size_t)m_count_finished.load() < total_count )
|
||||
for( unsigned jj = 0; jj < fpnames.size() && !m_cancelled; ++jj )
|
||||
{
|
||||
CatchErrors( [&]()
|
||||
{
|
||||
FOOTPRINT_INFO* fpinfo = new FOOTPRINT_INFO_IMPL( this, nickname, fpnames[jj] );
|
||||
queue_parsed.move_push( std::unique_ptr<FOOTPRINT_INFO>( fpinfo ) );
|
||||
});
|
||||
}
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < num_elements; ++ii )
|
||||
returns[ii] = tp.submit( fp_thread );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
std::future_status status;
|
||||
|
||||
wxMilliSleep( 33 /* 30 FPS refresh rate */ );
|
||||
do
|
||||
{
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
for( auto& thr : threads )
|
||||
thr.join();
|
||||
|
||||
std::unique_ptr<FOOTPRINT_INFO> fpi;
|
||||
|
||||
while( queue_parsed.pop( fpi ) )
|
||||
@ -343,14 +281,10 @@ bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
||||
{
|
||||
return *lhs < *rhs;
|
||||
} );
|
||||
|
||||
return m_errors.empty();
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_LIST_IMPL::FOOTPRINT_LIST_IMPL() :
|
||||
m_loader( nullptr ),
|
||||
m_count_finished( 0 ),
|
||||
m_list_timestamp( 0 ),
|
||||
m_progress_reporter( nullptr ),
|
||||
m_cancelled( false )
|
||||
@ -358,12 +292,6 @@ FOOTPRINT_LIST_IMPL::FOOTPRINT_LIST_IMPL() :
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_LIST_IMPL::~FOOTPRINT_LIST_IMPL()
|
||||
{
|
||||
stopWorkers();
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::WriteCacheToFile( const wxString& aFilePath )
|
||||
{
|
||||
wxFileName tmpFileName = wxFileName::CreateTempFileName( aFilePath );
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user