Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions Compiler/src/ssair/passes.jl
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,49 @@ function perform_lifting!(compact::IncrementalCompact,
return Pair{Any, PhiNest}(stmt_val, PhiNest(visited_philikes, lifted_philikes, lifted_leaves, reverse_mapping, walker_callback))
end

function lift_apply_args!(compact::IncrementalCompact, idx::Int, stmt::Expr, 𝕃ₒ::AbstractLattice)
# Handle _apply_iterate calls: convert arguments to use `Core.svec`. The behavior of Core.svec (with boxing) better matches the ABI of codegen.
compact[idx] = nothing
for i in 4:length(stmt.args) # Skip iterate function, f, and first iterator
arg = stmt.args[i]
arg_type = argextype(arg, compact)
svec_args = nothing
if isa(arg_type, DataType) && arg_type.name === Tuple.name
if isa(arg, SSAValue)
arg_stmt = compact[arg][:stmt]
if is_known_call(arg_stmt, Core.tuple, compact)
svec_args = copy(arg_stmt.args)
end
end
if svec_args === nothing
# Fallback path: generate getfield calls for tuple elements
tuple_length = length(arg_type.parameters)
if tuple_length > 0 && !isvarargtype(arg_type.parameters[tuple_length])
svec_args = Vector{Any}(undef, tuple_length + 1)
for j in 1:tuple_length
getfield_call = Expr(:call, GlobalRef(Core, :getfield), arg, j)
getfield_type = arg_type.parameters[j]
inst = compact[SSAValue(idx)]
getfield_ssa = insert_node!(compact, SSAValue(idx), NewInstruction(getfield_call, getfield_type, NoCallInfo(), inst[:line], inst[:flag]))
svec_args[j + 1] = getfield_ssa
end
end
end
end
# Create Core.svec call if we have arguments
if svec_args !== nothing
svec_args[1] = GlobalRef(Core, :svec)
new_svec_call = Expr(:call)
new_svec_call.args = svec_args
inst = compact[SSAValue(idx)]
new_svec_ssa = insert_node!(compact, SSAValue(idx), NewInstruction(new_svec_call, SimpleVector, NoCallInfo(), inst[:line], inst[:flag]))
stmt.args[i] = new_svec_ssa
end
end
compact[idx] = stmt
nothing
end

function lift_svec_ref!(compact::IncrementalCompact, idx::Int, stmt::Expr)
length(stmt.args) != 3 && return

Expand Down Expand Up @@ -1375,6 +1418,9 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing)
compact[SSAValue(idx)] = (compact[enter_ssa][:stmt]::EnterNode).scope
elseif isexpr(stmt, :new)
refine_new_effects!(𝕃ₒ, compact, idx, stmt)
elseif is_known_call(stmt, Core._apply_iterate, compact)
length(stmt.args) >= 4 || continue
lift_apply_args!(compact, idx, stmt, 𝕃ₒ)
end
continue
end
Expand Down
43 changes: 28 additions & 15 deletions Compiler/src/tfuncs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -585,6 +585,15 @@ end
add_tfunc(nfields, 1, 1, nfields_tfunc, 1)
add_tfunc(Core._expr, 1, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->Expr), 100)
add_tfunc(svec, 0, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->SimpleVector), 20)

@nospecs function _svec_len_tfunc(𝕃::AbstractLattice, s)
if isa(s, Const) && isa(s.val, SimpleVector)
return Const(length(s.val))
end
return Int
end
add_tfunc(Core._svec_len, 1, 1, _svec_len_tfunc, 1)

@nospecs function _svec_ref_tfunc(𝕃::AbstractLattice, s, i)
if isa(s, Const) && isa(i, Const)
s, i = s.val, i.val
Expand Down Expand Up @@ -1986,15 +1995,8 @@ function tuple_tfunc(𝕃::AbstractLattice, argtypes::Vector{Any})
# UnionAll context is missing around this.
pop!(argtypes)
end
all_are_const = true
for i in 1:length(argtypes)
if !isa(argtypes[i], Const)
all_are_const = false
break
end
end
if all_are_const
return Const(ntuple(i::Int->argtypes[i].val, length(argtypes)))
if is_all_const_arg(argtypes, 1) # repeated from builtin_tfunction for the benefit of callers that use this tfunc directly
return Const(tuple(collect_const_args(argtypes, 1)...))
end
params = Vector{Any}(undef, length(argtypes))
anyinfo = false
Expand Down Expand Up @@ -2359,14 +2361,17 @@ function _builtin_nothrow(𝕃::AbstractLattice, @nospecialize(f::Builtin), argt
elseif f === Core.compilerbarrier
na == 2 || return false
return compilerbarrier_nothrow(argtypes[1], nothing)
elseif f === Core._svec_len
na == 1 || return false
return _svec_len_tfunc(𝕃, argtypes[1]) isa Const
elseif f === Core._svec_ref
na == 2 || return false
return _svec_ref_tfunc(𝕃, argtypes[1], argtypes[2]) isa Const
end
return false
end

# known to be always effect-free (in particular nothrow)
# known to be always effect-free (in particular also nothrow)
const _PURE_BUILTINS = Any[
tuple,
svec,
Expand Down Expand Up @@ -2395,6 +2400,8 @@ const _CONSISTENT_BUILTINS = Any[
donotdelete,
memoryrefnew,
memoryrefoffset,
Core._svec_len,
Core._svec_ref,
]

# known to be effect-free (but not necessarily nothrow)
Expand All @@ -2419,6 +2426,7 @@ const _EFFECT_FREE_BUILTINS = [
Core.throw_methoderror,
getglobal,
compilerbarrier,
Core._svec_len,
Core._svec_ref,
]

Expand Down Expand Up @@ -2453,6 +2461,7 @@ const _ARGMEM_BUILTINS = Any[
replacefield!,
setfield!,
swapfield!,
Core._svec_len,
Core._svec_ref,
]

Expand Down Expand Up @@ -2637,7 +2646,7 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty
else
if contains_is(_CONSISTENT_BUILTINS, f)
consistent = ALWAYS_TRUE
elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned || f === Core._svec_ref
elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned || f === Core._svec_len || f === Core._svec_ref
consistent = CONSISTENT_IF_INACCESSIBLEMEMONLY
elseif f === Core._typevar || f === Core.memorynew
consistent = CONSISTENT_IF_NOTRETURNED
Expand Down Expand Up @@ -2746,11 +2755,12 @@ end
function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtypes::Vector{Any},
sv::Union{AbsIntState, Nothing})
𝕃ᵢ = typeinf_lattice(interp)
if isa(f, IntrinsicFunction)
if is_pure_intrinsic_infer(f) && all(@nospecialize(a) -> isa(a, Const), argtypes)
argvals = anymap(@nospecialize(a) -> (a::Const).val, argtypes)
# Early constant evaluation for foldable builtins with all const args
if isa(f, IntrinsicFunction) ? is_pure_intrinsic_infer(f) : (f in _PURE_BUILTINS || (f in _CONSISTENT_BUILTINS && f in _EFFECT_FREE_BUILTINS))
if is_all_const_arg(argtypes, 1)
argvals = collect_const_args(argtypes, 1)
try
# unroll a few cases which have specialized codegen
# unroll a few common cases for better codegen
if length(argvals) == 1
return Const(f(argvals[1]))
elseif length(argvals) == 2
Expand All @@ -2764,6 +2774,8 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp
return Bottom
end
end
end
if isa(f, IntrinsicFunction)
iidx = Int(reinterpret(Int32, f)) + 1
if iidx < 0 || iidx > length(T_IFUNC)
# unknown intrinsic
Expand All @@ -2790,6 +2802,7 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp
end
tf = T_FFUNC_VAL[fidx]
end

if hasvarargtype(argtypes)
if length(argtypes) - 1 > tf[2]
# definitely too many arguments
Expand Down
6 changes: 3 additions & 3 deletions Compiler/test/codegen.jl
Original file line number Diff line number Diff line change
Expand Up @@ -133,14 +133,14 @@ if !is_debug_build && opt_level > 0
# Array
test_loads_no_call(strip_debug_calls(get_llvm(sizeof, Tuple{Vector{Int}})), [Iptr])
# As long as the eltype is known we don't need to load the elsize, but do need to check isvector
@test_skip test_loads_no_call(strip_debug_calls(get_llvm(sizeof, Tuple{Array{Any}})), ["atomic $Iptr", "ptr", "ptr", Iptr, Iptr, "ptr", Iptr])
@test_skip test_loads_no_call(strip_debug_calls(get_llvm(sizeof, Tuple{Array{Any}})), ["atomic volatile $Iptr", "ptr", "ptr", Iptr, Iptr, "ptr", Iptr])
# Memory
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory{Int}})), [Iptr])
# As long as the eltype is known we don't need to load the elsize
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory{Any}})), [Iptr])
# Check that we load the elsize and isunion from the typeof layout
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory})), [Iptr, "atomic $Iptr", "ptr", "i32", "i16"])
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory})), [Iptr, "atomic $Iptr", "ptr", "i32", "i16"])
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory})), [Iptr, "atomic volatile $Iptr", "ptr", "i32", "i16"])
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Memory})), [Iptr, "atomic volatile $Iptr", "ptr", "i32", "i16"])
# Primitive Type size should be folded to a constant
test_loads_no_call(strip_debug_calls(get_llvm(core_sizeof, Tuple{Ptr})), String[])

Expand Down
2 changes: 1 addition & 1 deletion Compiler/test/effects.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1466,7 +1466,7 @@ end
let effects = Base.infer_effects((Core.SimpleVector,Int); optimize=false) do svec, i
Core._svec_ref(svec, i)
end
@test !Compiler.is_consistent(effects)
@test Compiler.is_consistent(effects)
@test Compiler.is_effect_free(effects)
@test !Compiler.is_nothrow(effects)
@test Compiler.is_terminates(effects)
Expand Down
1 change: 1 addition & 0 deletions base/Base.jl
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,7 @@ using .Filesystem
include("cmd.jl")
include("process.jl")
include("terminfo.jl")
include("Terminals.jl") # Moved from REPL to reduce invalidations
include("secretbuffer.jl")

# core math functions
Expand Down
File renamed without changes.
6 changes: 1 addition & 5 deletions base/essentials.jl
Original file line number Diff line number Diff line change
Expand Up @@ -934,11 +934,7 @@ setindex!(A::MemoryRef{Any}, @nospecialize(x)) = (memoryrefset!(A, x, :not_atomi

getindex(v::SimpleVector, i::Int) = (@_foldable_meta; Core._svec_ref(v, i))
function length(v::SimpleVector)
@_total_meta
t = @_gc_preserve_begin v
len = unsafe_load(Ptr{Int}(pointer_from_objref(v)))
@_gc_preserve_end t
return len
Core._svec_len(v)
end
firstindex(v::SimpleVector) = 1
lastindex(v::SimpleVector) = length(v)
Expand Down
7 changes: 5 additions & 2 deletions base/sort.jl
Original file line number Diff line number Diff line change
Expand Up @@ -563,12 +563,15 @@ function _sort!(v::UnwrappableSubArray, a::SubArrayOptimization, o::Ordering, kw
@getkw lo hi
# @assert v.stride1 == 1
parent = v.parent
if parent isa Array && !(parent isa Vector) && hi - lo < 100
if parent isa Array && !(parent isa Vector) && hi - lo < 100 || !iszero(v.offset1)
# vec(::Array{T, ≠1}) allocates and is therefore somewhat expensive.
# We don't want that for small inputs.

# Additionally, if offset1 is non-zero, then this optimization is incompatible with
# algorithms that track absolute first and last indices (e.g. ScratchQuickSort)
_sort!(v, a.next, o, kw)
else
_sort!(vec(parent), a.next, o, (;kw..., lo = lo + v.offset1, hi = hi + v.offset1))
_sort!(vec(parent), a.next, o, kw)
end
end

Expand Down
2 changes: 1 addition & 1 deletion base/stream.jl
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ end

function isopen(x::Union{LibuvStream, LibuvServer})
if x.status == StatusUninit || x.status == StatusInit || x.handle === C_NULL
throw(ArgumentError("$x is not initialized"))
throw(ArgumentError("stream not initialized"))
end
return x.status != StatusClosed
end
Expand Down
8 changes: 8 additions & 0 deletions contrib/generate_precompile.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,14 @@ hardcoded_precompile_statements = """
precompile(Base.unsafe_string, (Ptr{UInt8},))
precompile(Base.unsafe_string, (Ptr{Int8},))
# used by REPL
precompile(Tuple{typeof(Base.getproperty), Base.Terminals.TTYTerminal, Symbol})
precompile(Tuple{typeof(Base.reseteof), Base.Terminals.TTYTerminal})
precompile(Tuple{typeof(Base.Terminals.enable_bracketed_paste), Base.Terminals.TTYTerminal})
precompile(Tuple{typeof(Base.Terminals.width), Base.Terminals.TTYTerminal})
precompile(Tuple{typeof(Base.Terminals.height), Base.Terminals.TTYTerminal})
precompile(Tuple{typeof(Base.write), Base.Terminals.TTYTerminal, Array{UInt8, 1}})
# loading.jl - without these each precompile worker would precompile these because they're hit before pkgimages are loaded
precompile(Base.__require, (Module, Symbol))
precompile(Base.__require, (Base.PkgId,))
Expand Down
1 change: 1 addition & 0 deletions deps/libssh2.mk
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ LIBSSH2_OPTS += -G"MSYS Makefiles"
endif
else
LIBSSH2_OPTS += -DCRYPTO_BACKEND=OpenSSL -DENABLE_ZLIB_COMPRESSION=OFF
LIBSSH2_OPTS += -DOPENSSL_ROOT_DIR=$(build_prefix)
endif

ifneq (,$(findstring $(OS),Linux FreeBSD OpenBSD))
Expand Down
19 changes: 13 additions & 6 deletions deps/openssl.mk
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,21 @@ ifeq ($(OS),$(BUILD_OS))
endif
echo 1 > $@

# Override bindir and only install runtime libraries, otherwise they'll go into build_depsbindir.
OPENSSL_INSTALL = \
mkdir -p $2$$(build_shlibdir) && \
$$(MAKE) -C $1 install_dev $$(MAKE_COMMON) bindir=$$(build_shlibdir) $3 DESTDIR="$2"

OPENSSL_POST_INSTALL := \
$(WIN_MAKE_HARD_LINK) $(build_bindir)/libcrypto-*.dll $(build_bindir)/libcrypto.dll && \
$(WIN_MAKE_HARD_LINK) $(build_bindir)/libssl-*.dll $(build_bindir)/libssl.dll && \
$(INSTALL_NAME_CMD)libcrypto.$(SHLIB_EXT) $(build_shlibdir)/libcrypto.$(SHLIB_EXT) && \
$(INSTALL_NAME_CMD)libssl.$(SHLIB_EXT) $(build_shlibdir)/libssl.$(SHLIB_EXT) && \
$(INSTALL_NAME_CHANGE_CMD) $(build_shlibdir)/libcrypto.3.dylib @rpath/libcrypto.$(SHLIB_EXT) $(build_shlibdir)/libssl.$(SHLIB_EXT)

$(eval $(call staged-install, \
openssl,openssl-$(OPENSSL_VER), \
MAKE_INSTALL,,, \
$$(WIN_MAKE_HARD_LINK) $(build_bindir)/libcrypto-*.dll $(build_bindir)/libcrypto.dll && \
$$(WIN_MAKE_HARD_LINK) $(build_bindir)/libssl-*.dll $(build_bindir)/libssl.dll && \
$$(INSTALL_NAME_CMD)libcrypto.$$(SHLIB_EXT) $$(build_shlibdir)/libcrypto.$$(SHLIB_EXT) && \
$$(INSTALL_NAME_CMD)libssl.$$(SHLIB_EXT) $$(build_shlibdir)/libssl.$$(SHLIB_EXT) && \
$$(INSTALL_NAME_CHANGE_CMD) $$(build_shlibdir)/libcrypto.3.dylib @rpath/libcrypto.$$(SHLIB_EXT) $$(build_shlibdir)/libssl.$$(SHLIB_EXT)))
OPENSSL_INSTALL,,,$(OPENSSL_POST_INSTALL)))

clean-openssl:
-rm -f $(BUILDDIR)/-openssl-$(OPENSSL_VER)/build-configured $(BUILDDIR)/-openssl-$(OPENSSL_VER)/build-compiled
Expand Down
4 changes: 2 additions & 2 deletions doc/src/base/base.md
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ where
[]
```

## Standard Modules
## [Standard Modules](@id standard-modules)
```@docs
Main
Core
Base
```

## Base Submodules
## [Base Submodules](@id base-submodules)
```@docs
Base.Broadcast
Base.Docs
Expand Down
39 changes: 38 additions & 1 deletion doc/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ and [Ruby](https://en.wikipedia.org/wiki/Ruby_(programming_language)).

The most significant departures of Julia from typical dynamic languages are:

* The core language imposes very little; Julia Base and the standard library are written in Julia itself, including
* The core language imposes very little; [Julia Base and the standard library](@ref man-core-base-and-stdlib) are written in Julia itself, including
primitive operations like integer arithmetic
* A rich language of types for constructing and describing objects, that can also optionally be
used to make type declarations
Expand Down Expand Up @@ -126,3 +126,40 @@ language. In addition to the above, some advantages of Julia over comparable sys
* Call C functions directly (no wrappers or special APIs needed)
* Powerful shell-like capabilities for managing other processes
* Lisp-like macros and other metaprogramming facilities

## [Julia Standard Modules and the Standard Library](@id man-standard-modules-stdlib)

The Julia runtime comes with [standard modules](@ref standard-modules),
which are essential namespaces that are usually loaded automatically.

```@docs; canonical=false
Core
Base
```

Julia's `Base` module contains various [useful submodules](@ref base-submodules).

### [The Standard Library](@id man-stdlib)

The Julia standard library contains additional, commonly used packages that are installed alongside the Julia runtime by default.
To use a standard library package, it is first necessary to load the package with a [`using`](@ref) or [`import`](@ref) statement.
Links to available standard library packages are provided below,
and may also be found in the website sidebar.
Their source code is available in the `Sys.STDLIB` directory of a Julia installation.

```@eval
import Markdown
list = sort(filter(x -> match(r"_jll$", x) === nothing, readdir(Sys.STDLIB)))
Markdown.parse(join("- [`" .* list .* "`](stdlib/" .* list .* ".html)", "\n"))
```

Julia also provides various standard, pre-built binary libraries
of established software that is written in other languages.
By convention, these packages have names that end with `_jll`.
The [`using`](@ref) statement can load symbol names from these binary libraries:

```@eval
import Markdown
list = sort(filter(x -> match(r"_jll$", x) !== nothing, readdir(Sys.STDLIB)))
Markdown.parse(join("- [`" .* list .* "`](stdlib/" .* list .* ".html)", "\n"))
```
1 change: 1 addition & 0 deletions src/builtin_proto.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ extern "C" {
XX(_primitivetype,"_primitivetype") \
XX(_setsuper,"_setsuper!") \
XX(_structtype,"_structtype") \
XX(_svec_len,"_svec_len") \
XX(_svec_ref,"_svec_ref") \
XX(_typebody,"_typebody!") \
XX(_typevar,"_typevar") \
Expand Down
Loading