Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
py-heptapod
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
heptapod
py-heptapod
Commits
9f6ea791b57a
Commit
9f6ea791b57a
authored
3 years ago
by
Georges Racinet
Browse files
Options
Downloads
Patches
Plain Diff
WIP repo_cache LRU based on RSS
parent
2f8a596c2ee8
No related branches found
No related tags found
No related merge requests found
Pipeline
#27876
canceled
3 years ago
Stage: main
Stage: compat
Changes
2
Pipelines
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
heptapod/wsgi.py
+4
-3
4 additions, 3 deletions
heptapod/wsgi.py
hgext3rd/heptapod/repo_cache.py
+69
-0
69 additions, 0 deletions
hgext3rd/heptapod/repo_cache.py
with
73 additions
and
3 deletions
heptapod/wsgi.py
+
4
−
3
View file @
9f6ea791
...
...
@@ -64,8 +64,6 @@
'
PROJECT_NAMESPACE_FULL_PATH
'
,
)
_repo_instance_cache
=
dict
()
# TODO some appropriate kind of LRU
class
HgServe
(
object
):
"""
WSGI application serving repositories under a given root path
...
...
@@ -102,6 +100,9 @@
if
root
is
None
:
raise
ValueError
(
"
heptapod.repositories-root is not configured.
"
)
self
.
repos_root
=
root
self
.
repo_instances
=
repo_cache
.
LruProcessRssCache
(
68
<<
20
)
logger
.
info
(
"
LRU cache for repository instances initialized for
"
"
pid=%d
"
,
os
.
getpid
())
def
apply_heptapod_headers
(
self
,
environ
):
perm_user
=
environ
.
get
(
'
HTTP_X_HEPTAPOD_PERMISSION_USER
'
)
...
...
@@ -146,7 +147,7 @@
# the message)
raise
ErrorResponse
(
HTTP_NOT_FOUND
,
"
Not Found
"
)
logger
.
info
(
"
loading repo at %r
"
,
repo_path
)
repo
=
repo_cache
.
get_repo
(
_
repo_instance
_cache
,
self
.
ui
,
repo_path
)
repo
=
repo_cache
.
get_repo
(
self
.
repo_instance
s
,
self
.
ui
,
repo_path
)
# setting native mode, as a string so that standard hg boolean
# synonyms (yes, true, etc.) just work as usual.
...
...
This diff is collapsed.
Click to expand it.
hgext3rd/heptapod/repo_cache.py
+
69
−
0
View file @
9f6ea791
...
...
@@ -15,6 +15,7 @@
transaction, but it is still possible in Mercurial (and extensions)
to change repository data without a transaction.
"""
import
gc
import
logging
import
os
from
uuid
import
uuid4
...
...
@@ -18,8 +19,9 @@
import
logging
import
os
from
uuid
import
uuid4
import
psutil
import
weakref
from
mercurial
import
(
error
,
hg
,
...
...
@@ -21,8 +23,9 @@
import
weakref
from
mercurial
import
(
error
,
hg
,
util
,
)
from
mercurial.repoview
import
_filteredrepotypes
...
...
@@ -33,6 +36,72 @@
MUTATION_ID_ATTR
=
'
_instantiation_mutation_id
'
class
LruProcessRssCache
(
util
.
lrucachedict
):
"""
A LRU cache that drops entries relatively to the process memory usage.
rss_threshold: size taken in resident memory (bytes) above which the
LRU cache will start dropping objects. Not to be mistaken with an
actual total memory usage.
"""
drop_ratio
=
0.5
def
__init__
(
self
,
rss_threshold
):
self
.
process
=
psutil
.
Process
(
os
.
getpid
())
# hack: max (self.capacity) will be ignored entirely
# max_cost != 0 is enough to trigger _enforcecostlimit, which
# will run independently of per-item cost
# of course it would be better to have options to do this
# naturally in the base class
self
.
rss_threshold
=
rss_threshold
return
super
(
LruProcessRssCache
,
self
).
__init__
(
max
=
10
,
maxcost
=
1
)
def
_addcapacity
(
self
):
node
=
super
(
LruProcessRssCache
,
self
).
_addcapacity
()
# make it so that size stagnates (it is used only to check if
# the LRU cache reached the max size, which is irrelevant for our case)
self
.
_size
=
1
return
node
def
_enforcecostlimit
(
self
):
# this is called after allocation of the latest object stored in
# cache (obviously, since *that* happens even before attempting to
# keep in LRU)
initial_len
=
len
(
self
)
current_rss
=
self
.
process
.
memory_info
().
rss
if
initial_len
<=
1
or
current_rss
<
self
.
rss_threshold
:
return
target_len
=
max
(
1
,
int
(
initial_len
*
self
.
drop_ratio
))
logger
.
warning
(
"
Current RSS %d (%d MB) is above threshold %dMB.
"
"
Dropping %d repository instances from cache
"
,
current_rss
,
current_rss
>>
20
,
self
.
rss_threshold
,
initial_len
-
target_len
)
# taken from the superclass
n
=
self
.
_head
.
prev
while
n
.
key
is
util
.
_notset
:
n
=
n
.
prev
# memory reclamation might not be immediate, especially in the face
# of reference cycles. Let's just drop a fixed percentage of objects
# and see what happens next time.
while
len
(
self
)
>
target_len
:
del
self
.
_cache
[
n
.
key
]
# we don't care about totalcost, it would be fake
# self.totalcost -= n.cost
# the node list itself isn't trimmed (same is true in superclass)
# obviously that assumes its own cost is negligible compared
# to actual content.
n
.
markempty
()
n
=
n
.
prev
logger
.
info
(
"
Right after dropping repository instances from cache,
"
"
and calling GC, RSS is at %dMB
"
,
self
.
process
.
memory_info
().
rss
>>
20
)
def
acknowledge_mutation
(
repo
):
"""
Write a unique cache identifier to the repository.
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment